/linux-6.14.4/lib/crypto/ |
D | gf128mul.c | 56 #define gf128mul_dat(q) { \ argument 57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ 58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ 59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ 60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ 61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ 62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ 63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ 64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ 65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\ [all …]
|
/linux-6.14.4/drivers/net/ethernet/fungible/funeth/ |
D | funeth_rx.c | 50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument 52 struct funeth_rx_cache *c = &q->cache; in cache_offer() 58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer() 67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument 69 struct funeth_rx_cache *c = &q->cache; in cache_get() 77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get() 88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get() 98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument 103 if (cache_get(q, rb)) in funeth_alloc_page() 110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page() [all …]
|
D | funeth_tx.c | 56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument 58 return (void *)q->hw_wb; in txq_end() 64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument 66 return txq_end(q) - p; in txq_to_end() 78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument 90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl() 93 if (txq_to_end(q, gle) == 0) { in fun_write_gl() 94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl() 107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument 132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx() [all …]
|
/linux-6.14.4/drivers/gpu/drm/xe/ |
D | xe_guc_submit.c | 48 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument 50 return &q->gt->uc.guc; in exec_queue_to_guc() 71 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument 73 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered() 76 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument 78 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered() 81 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument 83 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered() 86 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument 88 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled() [all …]
|
D | xe_exec_queue.c | 36 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 39 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument 41 if (q->vm) in __xe_exec_queue_free() 42 xe_vm_put(q->vm); in __xe_exec_queue_free() 44 if (q->xef) in __xe_exec_queue_free() 45 xe_file_put(q->xef); in __xe_exec_queue_free() 47 kfree(q); in __xe_exec_queue_free() 56 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local 63 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc() 64 if (!q) in __xe_exec_queue_alloc() [all …]
|
/linux-6.14.4/Documentation/networking/ |
D | tls-offload-layers.svg | 1 …q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
|
/linux-6.14.4/sound/core/seq/oss/ |
D | seq_oss_readq.c | 35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local 37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new() 38 if (!q) in snd_seq_oss_readq_new() 41 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new() 42 if (!q->q) { in snd_seq_oss_readq_new() 43 kfree(q); in snd_seq_oss_readq_new() 47 q->maxlen = maxlen; in snd_seq_oss_readq_new() 48 q->qlen = 0; in snd_seq_oss_readq_new() 49 q->head = q->tail = 0; in snd_seq_oss_readq_new() 50 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new() [all …]
|
D | seq_oss_event.c | 22 static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev); 27 static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev); 42 snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) in snd_seq_oss_process_event() argument 44 switch (q->s.code) { in snd_seq_oss_process_event() 46 return extended_event(dp, q, ev); in snd_seq_oss_process_event() 49 return chn_voice_event(dp, q, ev); in snd_seq_oss_process_event() 52 return chn_common_event(dp, q, ev); in snd_seq_oss_process_event() 55 return timing_event(dp, q, ev); in snd_seq_oss_process_event() 58 return local_event(dp, q, ev); in snd_seq_oss_process_event() 61 return snd_seq_oss_synth_sysex(dp, q->x.dev, q->x.buf, ev); in snd_seq_oss_process_event() [all …]
|
/linux-6.14.4/sound/core/seq/ |
D | seq_queue.c | 50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument 57 queue_list[i] = q; in queue_list_add() 58 q->queue = i; in queue_list_add() 68 struct snd_seq_queue *q; in queue_list_remove() local 71 q = queue_list[id]; in queue_list_remove() 72 if (q) { in queue_list_remove() 73 guard(spinlock)(&q->owner_lock); in queue_list_remove() 74 if (q->owner == client) { in queue_list_remove() 76 q->klocked = 1; in queue_list_remove() 79 return q; in queue_list_remove() [all …]
|
/linux-6.14.4/net/sched/ |
D | sch_choke.c | 75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument 77 return (q->tail - q->head) & q->tab_mask; in choke_len() 81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument 83 return q->flags & TC_RED_ECN; in use_ecn() 87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument 89 return q->flags & TC_RED_HARDDROP; in use_harddrop() 93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument 96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes() 97 if (q->head == q->tail) in choke_zap_head_holes() 99 } while (q->tab[q->head] == NULL); in choke_zap_head_holes() [all …]
|
D | sch_netem.c | 210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument 212 struct clgstate *clg = &q->clg; in loss_4state() 213 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state() 275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument 277 struct clgstate *clg = &q->clg; in loss_gilb_ell() 278 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell() 297 static bool loss_event(struct netem_sched_data *q) in loss_event() argument 299 switch (q->loss_model) { in loss_event() 302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event() 310 return loss_4state(q); in loss_event() [all …]
|
D | sch_sfq.c | 143 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument 146 return &q->slots[val].dep; in sfq_dep_head() 147 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head() 150 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument 153 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash() 159 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local 166 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify() 169 fl = rcu_dereference_bh(q->filter_list); in sfq_classify() 171 return sfq_hash(q, skb) + 1; in sfq_classify() 187 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify() [all …]
|
D | sch_sfb.c | 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) in increment_qlen() argument 144 increment_one_qlen(sfbhash, 0, q); in increment_qlen() 148 increment_one_qlen(sfbhash, 1, q); in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() argument 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument 173 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen() 177 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen() [all …]
|
D | sch_red.c | 55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument 57 return q->flags & TC_RED_ECN; in red_use_ecn() 60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument 62 return q->flags & TC_RED_HARDDROP; in red_use_harddrop() 65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument 67 return q->flags & TC_RED_NODROP; in red_use_nodrop() 74 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local 75 struct Qdisc *child = q->qdisc; in red_enqueue() 79 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue() 80 &q->vars, in red_enqueue() [all …]
|
D | sch_fq_pie.c | 75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument 78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash() 84 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local 91 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify() 94 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify() 96 return fq_pie_hash(q, skb) + 1; in fq_pie_classify() 112 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify() 134 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local 152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 155 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue() [all …]
|
D | sch_fq_codel.c | 70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument 73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash() 79 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local 86 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify() 89 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify() 91 return fq_codel_hash(q, skb) + 1; in fq_codel_classify() 107 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify() 140 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local 154 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop() 155 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop() [all …]
|
D | sch_fq.c | 79 /* Following field is only used for q->internal, 80 * because q->internal is not hashed in fq_root[] 93 struct rb_node rate_node; /* anchor in q->delayed tree */ 197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, in fq_flow_add_tail() argument 200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; in fq_flow_add_tail() 213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument 215 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled() 216 q->throttled_flows--; in fq_flow_unset_throttled() 217 fq_flow_add_tail(q, f, OLD_FLOW); in fq_flow_unset_throttled() 220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument [all …]
|
/linux-6.14.4/drivers/media/common/videobuf2/ |
D | videobuf2-core.c | 47 #define dprintk(q, level, fmt, arg...) \ argument 50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 103 #define log_qop(q, op) \ argument 104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 105 (q)->ops->op ? "" : " (nop)") 107 #define call_qop(q, op, args...) \ argument 111 log_qop(q, op); \ 112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 114 (q)->cnt_ ## op++; \ 118 #define call_void_qop(q, op, args...) \ argument [all …]
|
/linux-6.14.4/net/xdp/ |
D | xsk_queue.h | 120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument 122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked() 123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked() 128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument 130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked() 131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked() 192 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument 194 return q->cached_cons != q->cached_prod; in xskq_has_descs() 197 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument 202 q->invalid_descs++; in xskq_cons_is_valid_desc() [all …]
|
/linux-6.14.4/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument 191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx() 192 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx() 193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx() 195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx() 196 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx() 197 q->tail = q->head; in mt76_dma_sync_idx() 200 void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, in __mt76_dma_queue_reset() argument 203 if (!q || !q->ndesc) in __mt76_dma_queue_reset() 206 if (!mt76_queue_is_wed_rro_ind(q)) { in __mt76_dma_queue_reset() [all …]
|
D | wed.c | 36 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; in mt76_wed_init_rx_buf() local 37 int i, len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_wed_init_rx_buf() 51 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_wed_init_rx_buf() 56 dir = page_pool_get_dma_dir(q->page_pool); in mt76_wed_init_rx_buf() 97 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) in mt76_wed_dma_setup() argument 102 if (!q || !q->ndesc) in mt76_wed_dma_setup() 105 flags = q->flags; in mt76_wed_dma_setup() 106 if (!q->wed || !mtk_wed_device_active(q->wed)) in mt76_wed_dma_setup() 107 q->flags &= ~MT_QFLAG_WED; in mt76_wed_dma_setup() 109 if (!(q->flags & MT_QFLAG_WED)) in mt76_wed_dma_setup() [all …]
|
/linux-6.14.4/drivers/spi/ |
D | spi-fsl-qspi.c | 277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument 279 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian() 282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument 284 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock() 287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument 289 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo() 292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument 294 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode() 297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument 299 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); in needs_amba_base_offset() [all …]
|
/linux-6.14.4/drivers/net/wireless/broadcom/b43/ |
D | pio.c | 24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument 37 cookie = (((u16)q->index + 1) << 12); in generate_cookie() 49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local 54 q = pio->tx_queue_AC_BK; in parse_cookie() 57 q = pio->tx_queue_AC_BE; in parse_cookie() 60 q = pio->tx_queue_AC_VI; in parse_cookie() 63 q = pio->tx_queue_AC_VO; in parse_cookie() 66 q = pio->tx_queue_mcast; in parse_cookie() 69 if (B43_WARN_ON(!q)) in parse_cookie() 72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie() [all …]
|
/linux-6.14.4/drivers/s390/cio/ |
D | qdio_main.c | 105 * @q: queue to manipulate 114 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument 117 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs() 120 qperf_inc(q, eqbs); in qdio_do_eqbs() 122 if (!q->is_input_q) in qdio_do_eqbs() 123 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs() 125 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs() 135 qperf_inc(q, eqbs_partial); in qdio_do_eqbs() 136 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs() 141 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs() [all …]
|
/linux-6.14.4/drivers/infiniband/sw/rxe/ |
D | rxe_queue.c | 46 inline void rxe_queue_reset(struct rxe_queue *q) in rxe_queue_reset() argument 52 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); in rxe_queue_reset() 58 struct rxe_queue *q; in rxe_queue_init() local 66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init() 67 if (!q) in rxe_queue_init() 70 q->rxe = rxe; in rxe_queue_init() 71 q->type = type; in rxe_queue_init() 74 q->elem_size = elem_size; in rxe_queue_init() 81 q->log2_elem_size = order_base_2(elem_size); in rxe_queue_init() 85 q->index_mask = num_slots - 1; in rxe_queue_init() [all …]
|