Lines Matching refs:cl
163 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) argument
181 struct cbq_class *cl; in cbq_reclassify() local
183 for (cl = this->tparent; cl; cl = cl->tparent) { in cbq_reclassify()
184 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; in cbq_reclassify()
210 struct cbq_class *cl = NULL; in cbq_classify() local
219 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
220 return cl; in cbq_classify()
235 cl = (void *)res.class; in cbq_classify()
236 if (!cl) { in cbq_classify()
238 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
239 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) in cbq_classify()
240 cl = defmap[TC_PRIO_BESTEFFORT]; in cbq_classify()
242 if (cl == NULL) in cbq_classify()
245 if (cl->level >= head->level) in cbq_classify()
257 return cbq_reclassify(skb, cl); in cbq_classify()
260 if (cl->level == 0) in cbq_classify()
261 return cl; in cbq_classify()
268 head = cl; in cbq_classify()
272 cl = head; in cbq_classify()
278 !(cl = head->defaults[prio & TC_PRIO_MAX]) && in cbq_classify()
279 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) in cbq_classify()
282 return cl; in cbq_classify()
291 static inline void cbq_activate_class(struct cbq_class *cl) in cbq_activate_class() argument
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class()
294 int prio = cl->cpriority; in cbq_activate_class()
298 q->active[prio] = cl; in cbq_activate_class()
301 cl->next_alive = cl_tail->next_alive; in cbq_activate_class()
302 cl_tail->next_alive = cl; in cbq_activate_class()
304 cl->next_alive = cl; in cbq_activate_class()
319 struct cbq_class *cl; in cbq_deactivate_class() local
323 cl = cl_prev->next_alive; in cbq_deactivate_class()
324 if (cl == this) { in cbq_deactivate_class()
325 cl_prev->next_alive = cl->next_alive; in cbq_deactivate_class()
326 cl->next_alive = NULL; in cbq_deactivate_class()
328 if (cl == q->active[prio]) { in cbq_deactivate_class()
330 if (cl == q->active[prio]) { in cbq_deactivate_class()
338 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
346 if (toplevel > cl->level) { in cbq_mark_toplevel()
350 if (cl->undertime < now) { in cbq_mark_toplevel()
351 q->toplevel = cl->level; in cbq_mark_toplevel()
354 } while ((cl = cl->borrow) != NULL && toplevel > cl->level); in cbq_mark_toplevel()
364 struct cbq_class *cl = cbq_classify(skb, sch, &ret); in cbq_enqueue() local
367 q->rx_class = cl; in cbq_enqueue()
369 if (cl == NULL) { in cbq_enqueue()
376 ret = qdisc_enqueue(skb, cl->q, to_free); in cbq_enqueue()
379 cbq_mark_toplevel(q, cl); in cbq_enqueue()
380 if (!cl->next_alive) in cbq_enqueue()
381 cbq_activate_class(cl); in cbq_enqueue()
387 cbq_mark_toplevel(q, cl); in cbq_enqueue()
388 cl->qstats.drops++; in cbq_enqueue()
394 static void cbq_overlimit(struct cbq_class *cl) in cbq_overlimit() argument
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_overlimit()
397 psched_tdiff_t delay = cl->undertime - q->now; in cbq_overlimit()
399 if (!cl->delayed) { in cbq_overlimit()
400 delay += cl->offtime; in cbq_overlimit()
409 if (cl->avgidle < 0) in cbq_overlimit()
410 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_overlimit()
411 if (cl->avgidle < cl->minidle) in cbq_overlimit()
412 cl->avgidle = cl->minidle; in cbq_overlimit()
415 cl->undertime = q->now + delay; in cbq_overlimit()
417 cl->xstats.overactions++; in cbq_overlimit()
418 cl->delayed = 1; in cbq_overlimit()
431 for (b = cl->borrow; b; b = b->borrow) { in cbq_overlimit()
447 struct cbq_class *cl; in cbq_undelay_prio() local
455 cl = cl_prev->next_alive; in cbq_undelay_prio()
456 if (now - cl->penalized > 0) { in cbq_undelay_prio()
457 cl_prev->next_alive = cl->next_alive; in cbq_undelay_prio()
458 cl->next_alive = NULL; in cbq_undelay_prio()
459 cl->cpriority = cl->priority; in cbq_undelay_prio()
460 cl->delayed = 0; in cbq_undelay_prio()
461 cbq_activate_class(cl); in cbq_undelay_prio()
463 if (cl == q->active[prio]) { in cbq_undelay_prio()
465 if (cl == q->active[prio]) { in cbq_undelay_prio()
471 cl = cl_prev->next_alive; in cbq_undelay_prio()
472 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
473 sched = cl->penalized; in cbq_undelay_prio()
474 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
532 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
533 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
554 struct cbq_class *cl = this; in cbq_update() local
564 for ( ; cl; cl = cl->share) { in cbq_update()
565 long avgidle = cl->avgidle; in cbq_update()
568 _bstats_update(&cl->bstats, len, 1); in cbq_update()
577 idle = now - cl->last; in cbq_update()
579 avgidle = cl->maxidle; in cbq_update()
581 idle -= L2T(cl, len); in cbq_update()
588 avgidle += idle - (avgidle>>cl->ewma_log); in cbq_update()
594 if (avgidle < cl->minidle) in cbq_update()
595 avgidle = cl->minidle; in cbq_update()
597 cl->avgidle = avgidle; in cbq_update()
607 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); in cbq_update()
619 idle += L2T(cl, len); in cbq_update()
621 cl->undertime = now + idle; in cbq_update()
625 cl->undertime = PSCHED_PASTPERFECT; in cbq_update()
626 if (avgidle > cl->maxidle) in cbq_update()
627 cl->avgidle = cl->maxidle; in cbq_update()
629 cl->avgidle = avgidle; in cbq_update()
631 if ((s64)(now - cl->last) > 0) in cbq_update()
632 cl->last = now; in cbq_update()
639 cbq_under_limit(struct cbq_class *cl) in cbq_under_limit() argument
641 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit()
642 struct cbq_class *this_cl = cl; in cbq_under_limit()
644 if (cl->tparent == NULL) in cbq_under_limit()
645 return cl; in cbq_under_limit()
647 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
648 cl->delayed = 0; in cbq_under_limit()
649 return cl; in cbq_under_limit()
663 cl = cl->borrow; in cbq_under_limit()
664 if (!cl) { in cbq_under_limit()
669 if (cl->level > q->toplevel) in cbq_under_limit()
671 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
673 cl->delayed = 0; in cbq_under_limit()
674 return cl; in cbq_under_limit()
681 struct cbq_class *cl_tail, *cl_prev, *cl; in cbq_dequeue_prio() local
686 cl = cl_prev->next_alive; in cbq_dequeue_prio()
693 struct cbq_class *borrow = cl; in cbq_dequeue_prio()
695 if (cl->q->q.qlen && in cbq_dequeue_prio()
696 (borrow = cbq_under_limit(cl)) == NULL) in cbq_dequeue_prio()
699 if (cl->deficit <= 0) { in cbq_dequeue_prio()
704 cl->deficit += cl->quantum; in cbq_dequeue_prio()
708 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
717 cl->deficit -= qdisc_pkt_len(skb); in cbq_dequeue_prio()
718 q->tx_class = cl; in cbq_dequeue_prio()
720 if (borrow != cl) { in cbq_dequeue_prio()
723 cl->xstats.borrows++; in cbq_dequeue_prio()
726 cl->xstats.borrows += qdisc_pkt_len(skb); in cbq_dequeue_prio()
731 if (cl->deficit <= 0) { in cbq_dequeue_prio()
732 q->active[prio] = cl; in cbq_dequeue_prio()
733 cl = cl->next_alive; in cbq_dequeue_prio()
734 cl->deficit += cl->quantum; in cbq_dequeue_prio()
739 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
743 cl_prev->next_alive = cl->next_alive; in cbq_dequeue_prio()
744 cl->next_alive = NULL; in cbq_dequeue_prio()
747 if (cl == cl_tail) { in cbq_dequeue_prio()
752 if (cl == cl_tail) { in cbq_dequeue_prio()
756 if (cl->q->q.qlen) in cbq_dequeue_prio()
757 cbq_activate_class(cl); in cbq_dequeue_prio()
763 if (cl->q->q.qlen) in cbq_dequeue_prio()
764 cbq_activate_class(cl); in cbq_dequeue_prio()
766 cl = cl_prev; in cbq_dequeue_prio()
770 cl_prev = cl; in cbq_dequeue_prio()
771 cl = cl->next_alive; in cbq_dequeue_prio()
870 struct cbq_class *cl; in cbq_adjust_levels() local
872 cl = this->children; in cbq_adjust_levels()
873 if (cl) { in cbq_adjust_levels()
875 if (cl->level > level) in cbq_adjust_levels()
876 level = cl->level; in cbq_adjust_levels()
877 } while ((cl = cl->sibling) != this->children); in cbq_adjust_levels()
885 struct cbq_class *cl; in cbq_normalize_quanta() local
892 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
896 if (cl->priority == prio) { in cbq_normalize_quanta()
897 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
900 if (cl->quantum <= 0 || in cbq_normalize_quanta()
901 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { in cbq_normalize_quanta()
903 cl->common.classid, cl->quantum); in cbq_normalize_quanta()
904 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; in cbq_normalize_quanta()
910 static void cbq_sync_defmap(struct cbq_class *cl) in cbq_sync_defmap() argument
912 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap()
913 struct cbq_class *split = cl->split; in cbq_sync_defmap()
921 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) in cbq_sync_defmap()
946 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) in cbq_change_defmap() argument
951 split = cl->split; in cbq_change_defmap()
958 for (split = cl->tparent; split; split = split->tparent) in cbq_change_defmap()
966 if (cl->split != split) { in cbq_change_defmap()
967 cl->defmap = 0; in cbq_change_defmap()
968 cbq_sync_defmap(cl); in cbq_change_defmap()
969 cl->split = split; in cbq_change_defmap()
970 cl->defmap = def & mask; in cbq_change_defmap()
972 cl->defmap = (cl->defmap & ~mask) | (def & mask); in cbq_change_defmap()
974 cbq_sync_defmap(cl); in cbq_change_defmap()
979 struct cbq_class *cl, **clp; in cbq_unlink_class() local
986 cl = *clp; in cbq_unlink_class()
988 if (cl == this) { in cbq_unlink_class()
989 *clp = cl->sibling; in cbq_unlink_class()
992 clp = &cl->sibling; in cbq_unlink_class()
993 } while ((cl = *clp) != this->sibling); in cbq_unlink_class()
1028 struct cbq_class *cl; in cbq_reset() local
1045 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1046 qdisc_reset(cl->q); in cbq_reset()
1048 cl->next_alive = NULL; in cbq_reset()
1049 cl->undertime = PSCHED_PASTPERFECT; in cbq_reset()
1050 cl->avgidle = cl->maxidle; in cbq_reset()
1051 cl->deficit = cl->quantum; in cbq_reset()
1052 cl->cpriority = cl->priority; in cbq_reset()
1059 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) in cbq_set_lss() argument
1062 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; in cbq_set_lss()
1063 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; in cbq_set_lss()
1066 cl->ewma_log = lss->ewma_log; in cbq_set_lss()
1068 cl->avpkt = lss->avpkt; in cbq_set_lss()
1070 cl->minidle = -(long)lss->minidle; in cbq_set_lss()
1072 cl->maxidle = lss->maxidle; in cbq_set_lss()
1073 cl->avgidle = lss->maxidle; in cbq_set_lss()
1076 cl->offtime = lss->offtime; in cbq_set_lss()
1080 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1082 q->nclasses[cl->priority]--; in cbq_rmprio()
1083 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1084 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1087 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1089 q->nclasses[cl->priority]++; in cbq_addprio()
1090 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1091 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1094 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) in cbq_set_wrr() argument
1096 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr()
1099 cl->allot = wrr->allot; in cbq_set_wrr()
1101 cl->weight = wrr->weight; in cbq_set_wrr()
1103 cl->priority = wrr->priority - 1; in cbq_set_wrr()
1104 cl->cpriority = cl->priority; in cbq_set_wrr()
1105 if (cl->priority >= cl->priority2) in cbq_set_wrr()
1106 cl->priority2 = TC_CBQ_MAXPRIO - 1; in cbq_set_wrr()
1109 cbq_addprio(q, cl); in cbq_set_wrr()
1113 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) in cbq_set_fopt() argument
1115 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); in cbq_set_fopt()
1231 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_rate() argument
1235 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) in cbq_dump_rate()
1244 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_lss() argument
1250 if (cl->borrow == NULL) in cbq_dump_lss()
1252 if (cl->share == NULL) in cbq_dump_lss()
1254 opt.ewma_log = cl->ewma_log; in cbq_dump_lss()
1255 opt.level = cl->level; in cbq_dump_lss()
1256 opt.avpkt = cl->avpkt; in cbq_dump_lss()
1257 opt.maxidle = cl->maxidle; in cbq_dump_lss()
1258 opt.minidle = (u32)(-cl->minidle); in cbq_dump_lss()
1259 opt.offtime = cl->offtime; in cbq_dump_lss()
1270 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_wrr() argument
1277 opt.allot = cl->allot; in cbq_dump_wrr()
1278 opt.priority = cl->priority + 1; in cbq_dump_wrr()
1279 opt.cpriority = cl->cpriority + 1; in cbq_dump_wrr()
1280 opt.weight = cl->weight; in cbq_dump_wrr()
1290 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_fopt() argument
1295 if (cl->split || cl->defmap) { in cbq_dump_fopt()
1296 opt.split = cl->split ? cl->split->common.classid : 0; in cbq_dump_fopt()
1297 opt.defmap = cl->defmap; in cbq_dump_fopt()
1309 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_attr() argument
1311 if (cbq_dump_lss(skb, cl) < 0 || in cbq_dump_attr()
1312 cbq_dump_rate(skb, cl) < 0 || in cbq_dump_attr()
1313 cbq_dump_wrr(skb, cl) < 0 || in cbq_dump_attr()
1314 cbq_dump_fopt(skb, cl) < 0) in cbq_dump_attr()
1349 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class() local
1352 if (cl->tparent) in cbq_dump_class()
1353 tcm->tcm_parent = cl->tparent->common.classid; in cbq_dump_class()
1356 tcm->tcm_handle = cl->common.classid; in cbq_dump_class()
1357 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1362 if (cbq_dump_attr(skb, cl) < 0) in cbq_dump_class()
1376 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class_stats() local
1379 cl->xstats.avgidle = cl->avgidle; in cbq_dump_class_stats()
1380 cl->xstats.undertime = 0; in cbq_dump_class_stats()
1381 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); in cbq_dump_class_stats()
1383 if (cl->undertime != PSCHED_PASTPERFECT) in cbq_dump_class_stats()
1384 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1386 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in cbq_dump_class_stats()
1387 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in cbq_dump_class_stats()
1388 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) in cbq_dump_class_stats()
1391 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in cbq_dump_class_stats()
1397 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_graft() local
1401 cl->common.classid, extack); in cbq_graft()
1406 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1412 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_leaf() local
1414 return cl->q; in cbq_leaf()
1419 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_qlen_notify() local
1421 cbq_deactivate_class(cl); in cbq_qlen_notify()
1431 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) in cbq_destroy_class() argument
1435 WARN_ON(cl->filters); in cbq_destroy_class()
1437 tcf_block_put(cl->block); in cbq_destroy_class()
1438 qdisc_put(cl->q); in cbq_destroy_class()
1439 qdisc_put_rtab(cl->R_tab); in cbq_destroy_class()
1440 gen_kill_estimator(&cl->rate_est); in cbq_destroy_class()
1441 if (cl != &q->link) in cbq_destroy_class()
1442 kfree(cl); in cbq_destroy_class()
1449 struct cbq_class *cl; in cbq_destroy() local
1461 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_destroy()
1462 tcf_block_put(cl->block); in cbq_destroy()
1463 cl->block = NULL; in cbq_destroy()
1467 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1469 cbq_destroy_class(sch, cl); in cbq_destroy()
1480 struct cbq_class *cl = (struct cbq_class *)*arg; in cbq_change_class() local
1495 if (cl) { in cbq_change_class()
1498 if (cl->tparent && in cbq_change_class()
1499 cl->tparent->common.classid != parentid) { in cbq_change_class()
1503 if (!cl->tparent && parentid != TC_H_ROOT) { in cbq_change_class()
1517 err = gen_replace_estimator(&cl->bstats, NULL, in cbq_change_class()
1518 &cl->rate_est, in cbq_change_class()
1532 if (cl->next_alive != NULL) in cbq_change_class()
1533 cbq_deactivate_class(cl); in cbq_change_class()
1536 qdisc_put_rtab(cl->R_tab); in cbq_change_class()
1537 cl->R_tab = rtab; in cbq_change_class()
1541 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1544 cbq_rmprio(q, cl); in cbq_change_class()
1545 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1549 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1551 if (cl->q->q.qlen) in cbq_change_class()
1552 cbq_activate_class(cl); in cbq_change_class()
1608 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in cbq_change_class()
1609 if (cl == NULL) in cbq_change_class()
1612 gnet_stats_basic_sync_init(&cl->bstats); in cbq_change_class()
1613 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in cbq_change_class()
1615 kfree(cl); in cbq_change_class()
1620 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, in cbq_change_class()
1624 tcf_block_put(cl->block); in cbq_change_class()
1625 kfree(cl); in cbq_change_class()
1630 cl->R_tab = rtab; in cbq_change_class()
1632 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, in cbq_change_class()
1634 if (!cl->q) in cbq_change_class()
1635 cl->q = &noop_qdisc; in cbq_change_class()
1637 qdisc_hash_add(cl->q, true); in cbq_change_class()
1639 cl->common.classid = classid; in cbq_change_class()
1640 cl->tparent = parent; in cbq_change_class()
1641 cl->qdisc = sch; in cbq_change_class()
1642 cl->allot = parent->allot; in cbq_change_class()
1643 cl->quantum = cl->allot; in cbq_change_class()
1644 cl->weight = cl->R_tab->rate.rate; in cbq_change_class()
1647 cbq_link_class(cl); in cbq_change_class()
1648 cl->borrow = cl->tparent; in cbq_change_class()
1649 if (cl->tparent != &q->link) in cbq_change_class()
1650 cl->share = cl->tparent; in cbq_change_class()
1652 cl->minidle = -0x7FFFFFFF; in cbq_change_class()
1653 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1654 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1655 if (cl->ewma_log == 0) in cbq_change_class()
1656 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1657 if (cl->maxidle == 0) in cbq_change_class()
1658 cl->maxidle = q->link.maxidle; in cbq_change_class()
1659 if (cl->avpkt == 0) in cbq_change_class()
1660 cl->avpkt = q->link.avpkt; in cbq_change_class()
1662 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1667 *arg = (unsigned long)cl; in cbq_change_class()
1679 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_delete() local
1681 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1686 qdisc_purge_queue(cl->q); in cbq_delete()
1688 if (cl->next_alive) in cbq_delete()
1689 cbq_deactivate_class(cl); in cbq_delete()
1691 if (q->tx_borrowed == cl) in cbq_delete()
1693 if (q->tx_class == cl) { in cbq_delete()
1698 if (q->rx_class == cl) in cbq_delete()
1702 cbq_unlink_class(cl); in cbq_delete()
1703 cbq_adjust_levels(cl->tparent); in cbq_delete()
1704 cl->defmap = 0; in cbq_delete()
1705 cbq_sync_defmap(cl); in cbq_delete()
1707 cbq_rmprio(q, cl); in cbq_delete()
1710 cbq_destroy_class(sch, cl); in cbq_delete()
1718 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_tcf_block() local
1720 if (cl == NULL) in cbq_tcf_block()
1721 cl = &q->link; in cbq_tcf_block()
1723 return cl->block; in cbq_tcf_block()
1731 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter() local
1733 if (cl) { in cbq_bind_filter()
1734 if (p && p->level <= cl->level) in cbq_bind_filter()
1736 cl->filters++; in cbq_bind_filter()
1737 return (unsigned long)cl; in cbq_bind_filter()
1744 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_unbind_filter() local
1746 cl->filters--; in cbq_unbind_filter()
1752 struct cbq_class *cl; in cbq_walk() local
1759 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()
1764 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in cbq_walk()