Home
last modified time | relevance | path

Searched refs:split (Results 1 – 25 of 499) sorted by relevance

12345678910>>...20

/linux-6.14.4/drivers/media/dvb-core/
Ddvb_ringbuffer.c136 size_t split; in dvb_ringbuffer_read_user() local
138 split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; in dvb_ringbuffer_read_user()
139 if (split > 0) { in dvb_ringbuffer_read_user()
140 if (copy_to_user(buf, rbuf->data+rbuf->pread, split)) in dvb_ringbuffer_read_user()
142 buf += split; in dvb_ringbuffer_read_user()
143 todo -= split; in dvb_ringbuffer_read_user()
162 size_t split; in dvb_ringbuffer_read() local
164 split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; in dvb_ringbuffer_read()
165 if (split > 0) { in dvb_ringbuffer_read()
166 memcpy(buf, rbuf->data+rbuf->pread, split); in dvb_ringbuffer_read()
[all …]
/linux-6.14.4/lib/
Dsg_split.c81 struct sg_splitter *split; in sg_split_phys() local
83 for (i = 0, split = splitters; i < nb_splits; i++, split++) { in sg_split_phys()
84 in_sg = split->in_sg0; in sg_split_phys()
85 out_sg = split->out_sg; in sg_split_phys()
86 for (j = 0; j < split->nents; j++, out_sg++) { in sg_split_phys()
89 out_sg->offset += split->skip_sg0; in sg_split_phys()
90 out_sg->length -= split->skip_sg0; in sg_split_phys()
96 out_sg[-1].length = split->length_last_sg; in sg_split_phys()
105 struct sg_splitter *split; in sg_split_mapped() local
107 for (i = 0, split = splitters; i < nb_splits; i++, split++) { in sg_split_mapped()
[all …]
/linux-6.14.4/fs/jfs/
Djfs_xtree.c96 uint split; member
109 struct xtsplit * split, struct btstack * btstack);
111 static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
115 struct xtsplit * split, struct metapage ** rmpp);
524 struct xtsplit split; /* split information */ in xtInsert() local
586 split.mp = mp; in xtInsert()
587 split.index = index; in xtInsert()
588 split.flag = xflag; in xtInsert()
589 split.off = xoff; in xtInsert()
590 split.len = xlen; in xtInsert()
[all …]
Djfs_dtree.c141 struct dtsplit * split, struct btstack * btstack);
143 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
147 struct dtsplit * split, struct btstack * btstack);
150 struct dtsplit * split, struct metapage ** rmpp);
823 struct dtsplit split; /* split information */ in dtInsert() local
866 split.mp = mp; in dtInsert()
867 split.index = index; in dtInsert()
868 split.nslot = n; in dtInsert()
869 split.key = name; in dtInsert()
870 split.data = &data; in dtInsert()
[all …]
/linux-6.14.4/tools/verification/dot2/
Dautomata.py55 line = dot_lines[cursor].split()
65 while self.__dot_lines[cursor].split()[0] != "{node":
71 while self.__dot_lines[cursor].split()[0] != "{node":
73 while self.__dot_lines[cursor].split()[0] == "{node":
88 while self.__dot_lines[cursor].split()[0] == "{node":
89 line = self.__dot_lines[cursor].split()
128 if self.__dot_lines[cursor].split()[1] == "->":
129 line = self.__dot_lines[cursor].split()
137 for i in event.split():
166 if self.__dot_lines[cursor].split()[1] == "->":
[all …]
/linux-6.14.4/fs/btrfs/
Dextent_map.c803 struct extent_map *split; in btrfs_drop_extent_map_range() local
829 split = alloc_extent_map(); in btrfs_drop_extent_map_range()
877 if (!split) { in btrfs_drop_extent_map_range()
878 split = split2; in btrfs_drop_extent_map_range()
880 if (!split) in btrfs_drop_extent_map_range()
883 split->start = em->start; in btrfs_drop_extent_map_range()
884 split->len = start - em->start; in btrfs_drop_extent_map_range()
887 split->disk_bytenr = em->disk_bytenr; in btrfs_drop_extent_map_range()
888 split->disk_num_bytes = em->disk_num_bytes; in btrfs_drop_extent_map_range()
889 split->offset = em->offset; in btrfs_drop_extent_map_range()
[all …]
/linux-6.14.4/drivers/virtio/
Dvirtio_ring.c201 struct vring_virtqueue_split split; member
567 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
579 desc = vq->split.vring.desc; in virtqueue_add_split()
580 extra = vq->split.desc_extra; in virtqueue_add_split()
637 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
648 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
649 vq->split.desc_extra, in virtqueue_add_split()
660 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
665 vq->split.desc_state[head].data = data; in virtqueue_add_split()
667 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
[all …]
/linux-6.14.4/arch/x86/kernel/cpu/
Dcacheinfo.c152 } split; member
161 } split; member
168 } split; member
298 eax->split.is_self_initializing = 1; in amd_cpuid4()
299 eax->split.type = types[leaf]; in amd_cpuid4()
300 eax->split.level = levels[leaf]; in amd_cpuid4()
301 eax->split.num_threads_sharing = 0; in amd_cpuid4()
302 eax->split.num_cores_on_die = topology_num_cores_per_package(); in amd_cpuid4()
306 eax->split.is_fully_associative = 1; in amd_cpuid4()
307 ebx->split.coherency_line_size = line_size - 1; in amd_cpuid4()
[all …]
/linux-6.14.4/tools/testing/selftests/vDSO/
Dvdso_test_chacha.c98 for (unsigned int split = 0; split < BLOCKS; ++split) { in main() local
101 if (split) in main()
102 __arch_chacha20_blocks_nostack(output2, key, counter2, split); in main()
103 __arch_chacha20_blocks_nostack(output2 + split * BLOCK_SIZE, key, counter2, BLOCKS - split); in main()
105 ksft_exit_fail_msg("Main loop outputs do not match on trial %u, split %u\n", trial, split); in main()
107 ksft_exit_fail_msg("Main loop counters do not match on trial %u, split %u\n", trial, split); in main()
/linux-6.14.4/Documentation/mm/
Dsplit_page_table_lock.rst8 scalability, split page table lock was introduced.
10 With split page table lock we have separate per-table lock to serialize
11 access to the table. At the moment we use split lock for PTE and PMD
42 If split lock is disabled, all tables are guarded by mm->page_table_lock.
47 Hugetlb and split page table lock
50 Hugetlb can support several page sizes. We use split lock only for PMD
56 takes pmd split lock for PMD_SIZE page, mm->page_table_lock
61 Support of split page table lock by an architecture
64 There's no need in special enabling of PTE split page table lock: everything
72 PMD split lock only makes sense if you have more than two page table
[all …]
/linux-6.14.4/security/apparmor/
Dlib.c79 char *split = strnchr(&name[1], end - &name[1], ':'); in aa_splitn_fqname() local
83 if (split) { in aa_splitn_fqname()
84 *ns_len = split - *ns_name; in aa_splitn_fqname()
87 split++; in aa_splitn_fqname()
88 if (end - split > 1 && strncmp(split, "//", 2) == 0) in aa_splitn_fqname()
89 split += 2; in aa_splitn_fqname()
90 name = skipn_spaces(split, end - split); in aa_splitn_fqname()
Dpolicy_ns.c177 const char *split; in __aa_lookupn_ns() local
179 for (split = strnstr(hname, "//", n); split; in __aa_lookupn_ns()
180 split = strnstr(hname, "//", n)) { in __aa_lookupn_ns()
181 ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname); in __aa_lookupn_ns()
185 n -= split + 2 - hname; in __aa_lookupn_ns()
186 hname = split + 2; in __aa_lookupn_ns()
Dpolicy.c447 char *split; in __lookup_parent() local
451 for (split = strstr(hname, "//"); split;) { in __lookup_parent()
453 split - hname); in __lookup_parent()
457 hname = split + 2; in __lookup_parent()
458 split = strstr(hname, "//"); in __lookup_parent()
482 char *split; in __create_missing_ancestors() local
489 for (split = strstr(hname, "//"); split;) { in __create_missing_ancestors()
492 split - hname); in __create_missing_ancestors()
494 const char *name = kstrndup(hname, split - hname, in __create_missing_ancestors()
506 hname = split + 2; in __create_missing_ancestors()
[all …]
/linux-6.14.4/arch/x86/events/amd/
Dlbr.c46 } split; member
57 } split; member
179 if ((!entry.to.split.valid && !entry.to.split.spec) || in amd_pmu_lbr_read()
180 entry.to.split.reserved) in amd_pmu_lbr_read()
185 br[out].from = sign_ext_branch_ip(entry.from.split.ip); in amd_pmu_lbr_read()
186 br[out].to = sign_ext_branch_ip(entry.to.split.ip); in amd_pmu_lbr_read()
187 br[out].mispred = entry.from.split.mispredict; in amd_pmu_lbr_read()
206 idx = (entry.to.split.valid << 1) | entry.to.split.spec; in amd_pmu_lbr_read()
430 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz; in amd_pmu_lbr_init()
Duncore.c69 } split; member
398 return info->split.cid; in amd_uncore_ctx_cid()
405 return info->split.gid; in amd_uncore_ctx_gid()
412 return info->split.num_pmcs; in amd_uncore_ctx_num_pmcs()
643 info.split.aux_data = 0; in amd_uncore_df_ctx_scan()
644 info.split.num_pmcs = NUM_COUNTERS_NB; in amd_uncore_df_ctx_scan()
645 info.split.gid = 0; in amd_uncore_df_ctx_scan()
646 info.split.cid = topology_logical_package_id(cpu); in amd_uncore_df_ctx_scan()
650 info.split.num_pmcs = ebx.split.num_df_pmc; in amd_uncore_df_ctx_scan()
779 info.split.aux_data = 0; in amd_uncore_l3_ctx_scan()
[all …]
/linux-6.14.4/arch/x86/include/asm/
Dperf_event.h158 } split; member
171 } split; member
182 } split; member
203 } split; member
214 } split; member
230 } split; member
242 } split; member
257 } split; member
275 } split; member
/linux-6.14.4/lib/zstd/compress/
Dzstd_ldm.c291 BYTE const* const split = ip + splits[n] - minMatchLength; in ZSTD_ldm_fillHashTable() local
292 U64 const xxhash = xxh64(split, minMatchLength, 0); in ZSTD_ldm_fillHashTable()
296 entry.offset = (U32)(split - base); in ZSTD_ldm_fillHashTable()
369 BYTE const* const split = ip + splits[n] - minMatchLength; in ZSTD_ldm_generateSequences_internal() local
370 U64 const xxhash = xxh64(split, minMatchLength, 0); in ZSTD_ldm_generateSequences_internal()
373 candidates[n].split = split; in ZSTD_ldm_generateSequences_internal()
384 BYTE const* const split = candidates[n].split; in ZSTD_ldm_generateSequences_internal() local
392 newEntry.offset = (U32)(split - base); in ZSTD_ldm_generateSequences_internal()
398 if (split < anchor) { in ZSTD_ldm_generateSequences_internal()
418 ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); in ZSTD_ldm_generateSequences_internal()
[all …]
/linux-6.14.4/Documentation/arch/x86/
Dbuslock.rst16 A split lock is any atomic operation whose operand crosses two cache lines.
20 A bus lock is acquired through either split locked access to writeback (WB)
29 mechanisms to detect split locks and bus locks. Some AMD processors also
32 #AC exception for split lock detection
35 Beginning with the Tremont Atom CPU split lock operations may raise an
36 Alignment Check (#AC) exception when a split lock operation is attempted.
52 |split_lock_detect=|#AC for split lock |#DB for bus lock |
61 | |split lock in parallel. | |
90 generating split lock and bus lock to block the hard real time code to
103 Disable checking for split lock and bus lock. This option can be useful if
/linux-6.14.4/tools/hv/
Dvmbus_testing154 f_name = f_path.split("/")[-1]
169 .format(device.split("/")[5]))
172 .format(device.split("/")[5]))
209 .format(state_path.split("/")[5]))
213 .format(state_path.split("/")[5]))
238 interrupt.split("/")[5]))
241 message.split("/")[5]))
263 print("ALL testing now OFF for {}".format(device.split("/")[-1]))
/linux-6.14.4/drivers/thermal/intel/
Dintel_hfi.c64 } split; member
74 } split; member
337 info->index = edx.split.index; in init_hfi_cpu_index()
549 if (!edx.split.capabilities.split.performance) { in hfi_parse_features()
558 edx.split.capabilities.split.__reserved = 0; in hfi_parse_features()
559 nr_capabilities = hweight8(edx.split.capabilities.bits); in hfi_parse_features()
562 hfi_features.nr_table_pages = edx.split.table_pages + 1; in hfi_parse_features()
/linux-6.14.4/drivers/md/dm-vdo/indexer/
Dvolume-index.c319 struct split_config *split) in split_configuration() argument
325 split->hook_config = *config; in split_configuration()
326 split->hook_geometry = *config->geometry; in split_configuration()
327 split->hook_config.geometry = &split->hook_geometry; in split_configuration()
328 split->non_hook_config = *config; in split_configuration()
329 split->non_hook_geometry = *config->geometry; in split_configuration()
330 split->non_hook_config.geometry = &split->non_hook_geometry; in split_configuration()
338 split->hook_geometry.records_per_chapter = sample_records; in split_configuration()
339 split->non_hook_geometry.records_per_chapter -= sample_records; in split_configuration()
342 split->hook_geometry.sparse_chapters_per_volume = 0; in split_configuration()
[all …]
/linux-6.14.4/arch/x86/kvm/vmx/
Dpmu_intel.c486 pmu->version = eax.split.version_id; in intel_pmu_refresh()
490 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
492 eax.split.bit_width = min_t(int, eax.split.bit_width, in intel_pmu_refresh()
494 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
495 eax.split.mask_length = min_t(int, eax.split.mask_length, in intel_pmu_refresh()
498 ((1ull << eax.split.mask_length) - 1); in intel_pmu_refresh()
503 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh()
505 edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed, in intel_pmu_refresh()
508 ((u64)1 << edx.split.bit_width_fixed) - 1; in intel_pmu_refresh()
/linux-6.14.4/scripts/
Djobserver-exec23 opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
28 fds = opts[-1].split("=", 1)[1]
38 reader, writer = [int(x) for x in fds.split(",", 1)]
/linux-6.14.4/security/apparmor/include/
Dlib.h160 char *split; in basename() local
163 for (split = strstr(hname, "//"); split; split = strstr(hname, "//")) in basename()
164 hname = split + 2; in basename()
/linux-6.14.4/tools/testing/selftests/turbostat/
Dadded_perf_counters.py157 actual_columns = proc_turbostat.stdout.split(b'\n')[0].split(b'\t')
176 actual_columns = proc_turbostat.stdout.split(b'\n')[0].split(b'\t')

12345678910>>...20