Lines Matching full:q

79 		/* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
93 struct rb_node rate_node; /* anchor in q->delayed tree */
197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, in fq_flow_add_tail() argument
200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; in fq_flow_add_tail()
213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument
215 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled()
216 q->throttled_flows--; in fq_flow_unset_throttled()
217 fq_flow_add_tail(q, f, OLD_FLOW); in fq_flow_unset_throttled()
220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
222 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
235 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
236 q->throttled_flows++; in fq_flow_set_throttled()
237 q->stat_throttled++; in fq_flow_set_throttled()
240 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
241 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
258 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
295 q->flows -= fcnt; in fq_gc()
296 q->inactive_flows -= fcnt; in fq_gc()
297 q->stat_gc_flows += fcnt; in fq_gc()
315 const struct fq_sched_data *q = qdisc_priv(sch); in fq_fastpath_check() local
318 if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon) in fq_fastpath_check()
321 if (sch->q.qlen != 0) { in fq_fastpath_check()
327 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check()
333 if (q->internal.qlen >= 8) in fq_fastpath_check()
339 if (q->time_next_delayed_flow <= now + q->offload_horizon) in fq_fastpath_check()
348 if (q->flow_max_rate != ~0UL) in fq_fastpath_check()
357 struct fq_sched_data *q = qdisc_priv(sch); in fq_classify() local
374 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
382 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
395 q->internal.stat_fastpath_packets++; in fq_classify()
396 if (skb->sk == sk && q->rate_enable && in fq_classify()
400 return &q->internal; in fq_classify()
403 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; in fq_classify()
405 fq_gc(q, root, sk); in fq_classify()
421 f->credit = q->initial_quantum; in fq_classify()
423 if (q->rate_enable) in fq_classify()
427 fq_flow_unset_throttled(q, f); in fq_classify()
440 q->stat_allocation_errors++; in fq_classify()
441 return &q->internal; in fq_classify()
449 if (q->rate_enable) in fq_classify()
453 f->credit = q->initial_quantum; in fq_classify()
458 q->flows++; in fq_classify()
459 q->inactive_flows++; in fq_classify()
499 sch->q.qlen--; in fq_dequeue_skb()
535 const struct fq_sched_data *q, u64 now) in fq_packet_beyond_horizon() argument
537 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon)); in fq_packet_beyond_horizon()
545 struct fq_sched_data *q = qdisc_priv(sch); in fq_enqueue() local
550 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX); in fq_enqueue()
551 if (unlikely(q->band_pkt_count[band] >= sch->limit)) { in fq_enqueue()
552 q->stat_band_drops[band]++; in fq_enqueue()
562 if (fq_packet_beyond_horizon(skb, q, now)) { in fq_enqueue()
563 if (q->horizon_drop) { in fq_enqueue()
564 q->stat_horizon_drops++; in fq_enqueue()
568 q->stat_horizon_caps++; in fq_enqueue()
569 skb->tstamp = now + q->horizon; in fq_enqueue()
576 if (f != &q->internal) { in fq_enqueue()
577 if (unlikely(f->qlen >= q->flow_plimit)) { in fq_enqueue()
578 q->stat_flows_plimit++; in fq_enqueue()
584 fq_flow_add_tail(q, f, NEW_FLOW); in fq_enqueue()
585 if (time_after(jiffies, f->age + q->flow_refill_delay)) in fq_enqueue()
586 f->credit = max_t(u32, f->credit, q->quantum); in fq_enqueue()
590 q->band_pkt_count[band]++; in fq_enqueue()
593 q->inactive_flows--; in fq_enqueue()
601 sch->q.qlen++; in fq_enqueue()
607 static void fq_check_throttled(struct fq_sched_data *q, u64 now) in fq_check_throttled() argument
612 if (q->time_next_delayed_flow > now + q->offload_horizon) in fq_check_throttled()
618 sample = (unsigned long)(now - q->time_next_delayed_flow); in fq_check_throttled()
620 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; in fq_check_throttled()
621 q->unthrottle_latency_ns += sample >> 3; in fq_check_throttled()
623 now += q->offload_horizon; in fq_check_throttled()
625 q->time_next_delayed_flow = ~0ULL; in fq_check_throttled()
626 while ((p = rb_first(&q->delayed)) != NULL) { in fq_check_throttled()
630 q->time_next_delayed_flow = f->time_next_packet; in fq_check_throttled()
633 fq_flow_unset_throttled(q, f); in fq_check_throttled()
650 struct fq_sched_data *q = qdisc_priv(sch); in fq_dequeue() local
660 if (!sch->q.qlen) in fq_dequeue()
663 skb = fq_peek(&q->internal); in fq_dequeue()
665 q->internal.qlen--; in fq_dequeue()
666 fq_dequeue_skb(sch, &q->internal, skb); in fq_dequeue()
671 fq_check_throttled(q, now); in fq_dequeue()
673 pband = &q->band_flows[q->band_nr]; in fq_dequeue()
678 if (++q->band_nr == FQ_BANDS) in fq_dequeue()
679 q->band_nr = 0; in fq_dequeue()
680 pband = &q->band_flows[q->band_nr]; in fq_dequeue()
687 if (q->time_next_delayed_flow != ~0ULL) in fq_dequeue()
688 qdisc_watchdog_schedule_range_ns(&q->watchdog, in fq_dequeue()
689 q->time_next_delayed_flow, in fq_dequeue()
690 q->timer_slack); in fq_dequeue()
696 f->credit += q->quantum; in fq_dequeue()
698 fq_flow_add_tail(q, f, OLD_FLOW); in fq_dequeue()
707 if (now + q->offload_horizon < time_next_packet) { in fq_dequeue()
710 fq_flow_set_throttled(q, f); in fq_dequeue()
714 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) { in fq_dequeue()
716 q->stat_ce_mark++; in fq_dequeue()
719 q->inactive_flows++; in fq_dequeue()
720 q->band_pkt_count[fq_skb_cb(skb)->band]--; in fq_dequeue()
726 fq_flow_add_tail(q, f, OLD_FLOW); in fq_dequeue()
736 if (!q->rate_enable) in fq_dequeue()
739 rate = q->flow_max_rate; in fq_dequeue()
749 if (rate <= q->low_rate_threshold) { in fq_dequeue()
752 plen = max(plen, q->quantum); in fq_dequeue()
768 q->stat_pkts_too_long++; in fq_dequeue()
801 struct fq_sched_data *q = qdisc_priv(sch); in fq_reset() local
807 sch->q.qlen = 0; in fq_reset()
810 fq_flow_purge(&q->internal); in fq_reset()
812 if (!q->fq_root) in fq_reset()
815 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { in fq_reset()
816 root = &q->fq_root[idx]; in fq_reset()
827 q->band_flows[idx].new_flows.first = NULL; in fq_reset()
828 q->band_flows[idx].old_flows.first = NULL; in fq_reset()
830 q->delayed = RB_ROOT; in fq_reset()
831 q->flows = 0; in fq_reset()
832 q->inactive_flows = 0; in fq_reset()
833 q->throttled_flows = 0; in fq_reset()
836 static void fq_rehash(struct fq_sched_data *q, in fq_rehash() argument
876 q->flows -= fcnt; in fq_rehash()
877 q->inactive_flows -= fcnt; in fq_rehash()
878 q->stat_gc_flows += fcnt; in fq_rehash()
888 struct fq_sched_data *q = qdisc_priv(sch); in fq_resize() local
893 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
907 old_fq_root = q->fq_root; in fq_resize()
909 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); in fq_resize()
911 q->fq_root = array; in fq_resize()
912 WRITE_ONCE(q->fq_trees_log, log); in fq_resize()
972 static int fq_load_weights(struct fq_sched_data *q, in fq_load_weights() argument
987 WRITE_ONCE(q->band_flows[i].quantum, weights[i]); in fq_load_weights()
991 static int fq_load_priomap(struct fq_sched_data *q, in fq_load_priomap() argument
1009 fq_prio2band_compress_crumb(map->priomap, q->prio2band); in fq_load_priomap()
1016 struct fq_sched_data *q = qdisc_priv(sch); in fq_change() local
1029 fq_log = q->fq_trees_log; in fq_change()
1044 WRITE_ONCE(q->flow_plimit, in fq_change()
1051 WRITE_ONCE(q->quantum, quantum); in fq_change()
1059 WRITE_ONCE(q->initial_quantum, in fq_change()
1069 WRITE_ONCE(q->flow_max_rate, in fq_change()
1073 WRITE_ONCE(q->low_rate_threshold, in fq_change()
1080 WRITE_ONCE(q->rate_enable, in fq_change()
1089 WRITE_ONCE(q->flow_refill_delay, in fq_change()
1094 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack); in fq_change()
1097 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack); in fq_change()
1100 WRITE_ONCE(q->orphan_mask, in fq_change()
1104 WRITE_ONCE(q->ce_threshold, in fq_change()
1109 WRITE_ONCE(q->timer_slack, in fq_change()
1113 WRITE_ONCE(q->horizon, in fq_change()
1118 WRITE_ONCE(q->horizon_drop, in fq_change()
1126 WRITE_ONCE(q->offload_horizon, offload_horizon); in fq_change()
1138 while (sch->q.qlen > sch->limit) { in fq_change()
1155 struct fq_sched_data *q = qdisc_priv(sch); in fq_destroy() local
1158 fq_free(q->fq_root); in fq_destroy()
1159 qdisc_watchdog_cancel(&q->watchdog); in fq_destroy()
1165 struct fq_sched_data *q = qdisc_priv(sch); in fq_init() local
1169 q->flow_plimit = 100; in fq_init()
1170 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); in fq_init()
1171 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); in fq_init()
1172 q->flow_refill_delay = msecs_to_jiffies(40); in fq_init()
1173 q->flow_max_rate = ~0UL; in fq_init()
1174 q->time_next_delayed_flow = ~0ULL; in fq_init()
1175 q->rate_enable = 1; in fq_init()
1177 q->band_flows[i].new_flows.first = NULL; in fq_init()
1178 q->band_flows[i].old_flows.first = NULL; in fq_init()
1180 q->band_flows[0].quantum = 9 << 16; in fq_init()
1181 q->band_flows[1].quantum = 3 << 16; in fq_init()
1182 q->band_flows[2].quantum = 1 << 16; in fq_init()
1183 q->delayed = RB_ROOT; in fq_init()
1184 q->fq_root = NULL; in fq_init()
1185 q->fq_trees_log = ilog2(1024); in fq_init()
1186 q->orphan_mask = 1024 - 1; in fq_init()
1187 q->low_rate_threshold = 550000 / 8; in fq_init()
1189 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */ in fq_init()
1191 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */ in fq_init()
1192 q->horizon_drop = 1; /* by default, drop packets beyond horizon */ in fq_init()
1195 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; in fq_init()
1197 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band); in fq_init()
1198 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); in fq_init()
1203 err = fq_resize(sch, q->fq_trees_log); in fq_init()
1210 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump() local
1226 ce_threshold = READ_ONCE(q->ce_threshold); in fq_dump()
1229 horizon = READ_ONCE(q->horizon); in fq_dump()
1232 offload_horizon = READ_ONCE(q->offload_horizon); in fq_dump()
1238 READ_ONCE(q->flow_plimit)) || in fq_dump()
1240 READ_ONCE(q->quantum)) || in fq_dump()
1242 READ_ONCE(q->initial_quantum)) || in fq_dump()
1244 READ_ONCE(q->rate_enable)) || in fq_dump()
1247 READ_ONCE(q->flow_max_rate), ~0U)) || in fq_dump()
1249 jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) || in fq_dump()
1251 READ_ONCE(q->orphan_mask)) || in fq_dump()
1253 READ_ONCE(q->low_rate_threshold)) || in fq_dump()
1256 READ_ONCE(q->fq_trees_log)) || in fq_dump()
1258 READ_ONCE(q->timer_slack)) || in fq_dump()
1262 READ_ONCE(q->horizon_drop))) in fq_dump()
1265 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap); in fq_dump()
1269 weights[0] = READ_ONCE(q->band_flows[0].quantum); in fq_dump()
1270 weights[1] = READ_ONCE(q->band_flows[1].quantum); in fq_dump()
1271 weights[2] = READ_ONCE(q->band_flows[2].quantum); in fq_dump()
1283 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump_stats() local
1291 st.gc_flows = q->stat_gc_flows; in fq_dump_stats()
1293 st.fastpath_packets = q->internal.stat_fastpath_packets; in fq_dump_stats()
1295 st.throttled = q->stat_throttled; in fq_dump_stats()
1296 st.flows_plimit = q->stat_flows_plimit; in fq_dump_stats()
1297 st.pkts_too_long = q->stat_pkts_too_long; in fq_dump_stats()
1298 st.allocation_errors = q->stat_allocation_errors; in fq_dump_stats()
1299 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - in fq_dump_stats()
1301 st.flows = q->flows; in fq_dump_stats()
1302 st.inactive_flows = q->inactive_flows; in fq_dump_stats()
1303 st.throttled_flows = q->throttled_flows; in fq_dump_stats()
1305 q->unthrottle_latency_ns, ~0U); in fq_dump_stats()
1306 st.ce_mark = q->stat_ce_mark; in fq_dump_stats()
1307 st.horizon_drops = q->stat_horizon_drops; in fq_dump_stats()
1308 st.horizon_caps = q->stat_horizon_caps; in fq_dump_stats()
1310 st.band_drops[i] = q->stat_band_drops[i]; in fq_dump_stats()
1311 st.band_pkt_count[i] = q->band_pkt_count[i]; in fq_dump_stats()