| /linux/include/net/ |
| A D | fq_impl.h | 36 idx = flow - fq->flows; in __fq_adjust_removal() 152 flow = &fq->flows[idx]; in fq_flow_classify() 160 tin->flows++; in fq_flow_classify() 173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow() 357 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 358 if (!fq->flows) in fq_init() 364 kvfree(fq->flows); in fq_init() 365 fq->flows = NULL; in fq_init() 370 fq_flow_init(&fq->flows[i]); in fq_init() 383 kvfree(fq->flows); in fq_reset() [all …]
|
| A D | fq.h | 53 u32 flows; member 65 struct fq_flow *flows; member
|
| /linux/samples/bpf/ |
| A D | do_hbm_test.sh | 78 flows=1 150 -f=*|--flows=*) 151 flows="${i#*=}" 278 while [ $flow_cnt -le $flows ] ; do 320 while [ $flow_cnt -le $flows ] ; do 346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 366 while [ $flow_cnt -le $flows ] ; do 386 while [ $flow_cnt -le $flows ] ; do
|
| /linux/net/sched/ |
| A D | sch_fq_codel.c | 164 flow = &q->flows[idx]; in fq_codel_drop() 204 flow = &q->flows[idx]; in fq_codel_enqueue() 343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 385 if (q->flows) in fq_codel_change() 461 kvfree(q->flows); in fq_codel_destroy() 493 if (!q->flows) { in fq_codel_init() 494 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 497 if (!q->flows) { in fq_codel_init() 520 kvfree(q->flows); in fq_codel_init() 521 q->flows = NULL; in fq_codel_init() [all …]
|
| A D | sch_fq_pie.c | 57 struct fq_pie_flow *flows; member 149 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 301 if (q->flows) { in fq_pie_change() 383 pie_calculate_probability(&q->p_params, &q->flows[idx].vars, in fq_pie_timer() 384 q->flows[idx].backlog); in fq_pie_timer() 424 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 426 if (!q->flows) { in fq_pie_init() 431 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 515 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 536 kvfree(q->flows); in fq_pie_destroy()
|
| A D | sch_atm.c | 66 struct list_head flows; /* NB: "link" is also on this member 78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 356 list_for_each_entry(flow, &p->flows, list) { in atm_tc_walk() 394 list_for_each_entry(flow, &p->flows, list) { in atm_tc_enqueue() 478 list_for_each_entry(flow, &p->flows, list) { in sch_atm_dequeue() 549 INIT_LIST_HEAD(&p->flows); in atm_tc_init() 552 list_add(&p->link.list, &p->flows); in atm_tc_init() 578 list_for_each_entry(flow, &p->flows, list) in atm_tc_reset() 589 list_for_each_entry(flow, &p->flows, list) { in atm_tc_destroy() 594 list_for_each_entry_safe(flow, tmp, &p->flows, list) { in atm_tc_destroy()
|
| A D | sch_fq.c | 120 u32 flows; member 254 q->flows -= fcnt; in fq_gc() 304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify() 305 q->inactive_flows > q->flows/2) in fq_classify() 359 q->flows++; in fq_classify() 692 q->flows = 0; in fq_reset() 737 q->flows -= fcnt; in fq_rehash() 1023 st.flows = q->flows; in fq_dump_stats()
|
| A D | sch_cake.c | 150 struct cake_flow flows[CAKE_QUEUES]; member 743 q->flows[reduced_hash].set)) { in cake_hash() 761 if (!q->flows[outer_hash + k].set) { in cake_hash() 776 if (!q->flows[outer_hash + k].set) { in cake_hash() 819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 1525 flow = &b->flows[idx]; in cake_drop() 1715 flow = &b->flows[idx]; in cake_enqueue() 2052 q->cur_flow = flow - b->flows; in cake_dequeue() 2752 struct cake_flow *flow = b->flows + j; in cake_init() 2995 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats() [all …]
|
| /linux/drivers/crypto/allwinner/sun8i-ss/ |
| A D | sun8i-ss-core.c | 71 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 128 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task() 129 ss->flows[flow].status = 0; in sun8i_ss_run_task() 136 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 155 ss->flows[flow].status = 1; in ss_irq_handler() 156 complete(&ss->flows[flow].complete); in ss_irq_handler() 455 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows() 469 if (!ss->flows) in allocate_flows() 473 init_completion(&ss->flows[i].complete); in allocate_flows() 476 if (!ss->flows[i].engine) { in allocate_flows() [all …]
|
| A D | sun8i-ss-prng.c | 129 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 130 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 136 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 138 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
| A D | sun8i-ss-hash.c | 208 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 241 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 242 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 247 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 249 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 310 engine = ss->flows[e].engine; in sun8i_ss_hash_digest()
|
| /linux/drivers/dma/ti/ |
| A D | k3-udma-glue.c | 83 struct k3_udma_glue_rx_flow *flows; member 956 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 957 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 958 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 968 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 1037 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn() 1038 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn() 1039 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn() 1067 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn() 1141 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| A D | tid_rdma.c | 1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1637 if (likely(req->flows)) in hfi1_kern_exp_rcv_alloc_flows() 1639 flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, in hfi1_kern_exp_rcv_alloc_flows() 1641 if (!flows) in hfi1_kern_exp_rcv_alloc_flows() 1645 flows[i].req = req; in hfi1_kern_exp_rcv_alloc_flows() 1646 flows[i].npagesets = 0; in hfi1_kern_exp_rcv_alloc_flows() 1650 req->flows = flows; in hfi1_kern_exp_rcv_alloc_flows() 1691 flow = &req->flows[tail]; in find_flow_ib() 3070 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | eswitch_offloads.c | 1030 if (!num_vfs || !flows) in mlx5_eswitch_del_send_to_vport_meta_rules() 1036 kvfree(flows); in mlx5_eswitch_del_send_to_vport_meta_rules() 1053 flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL); in mlx5_eswitch_add_send_to_vport_meta_rules() 1054 if (!flows) in mlx5_eswitch_add_send_to_vport_meta_rules() 1100 kvfree(flows); in mlx5_eswitch_add_send_to_vport_meta_rules() 1232 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 1233 if (!flows) { in esw_add_fdb_peer_miss_rules() 1253 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1289 if (!flows[vport->index]) in esw_add_fdb_peer_miss_rules() 1304 kvfree(flows); in esw_add_fdb_peer_miss_rules() [all …]
|
| A D | en_rep.h | 177 struct list_head flows; member 195 struct list_head flows; member
|
| /linux/Documentation/networking/ |
| A D | nf_flowtable.rst | 33 specifies what flows are placed into the flowtable. Hence, packets follow the 34 classic IP forwarding path unless the user explicitly instruct flows to use this 111 You can identify offloaded flows through the [OFFLOAD] tag when listing your 130 instead the real device is sufficient for the flowtable to track your flows. 198 There is a workqueue that adds the flows to the hardware. Note that a few 202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
|
| A D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 188 to the same CPU is CPU load imbalance if flows vary in packet rate. 194 Flow Limit is an optional RPS feature that prioritizes small flows 195 during CPU contention by dropping packets from large flows slightly 196 ahead of those from small flows. It is active only when an RPS or RFS 202 new packet is dropped. Packets from other flows are still only 206 even large flows maintain connectivity. 224 identification of large flows and fewer false positives. The default 261 flows to the CPUs where those flows are being processed. The flow hash 266 same CPU. Indeed, with many flows and few CPUs, it is very likely that [all …]
|
| A D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
| A D | pktgen.rst | 97 flows: 0 flowlen: 0 112 flows: 0 285 pgset "flows 1" 379 flows
|
| /linux/Documentation/userspace-api/media/mediactl/ |
| A D | media-controller-model.rst | 26 by an entity flows from the entity's output to one or more entity 31 pads, either on the same entity or on different entities. Data flows
|
| /linux/net/core/ |
| A D | pktgen.c | 414 struct flow_state *flows; member 2318 pkt_dev->flows[flow].count = 0; in f_pick() 2319 pkt_dev->flows[flow].flags = 0; in f_pick() 2329 pkt_dev->flows[flow].count = 0; in f_pick() 2330 pkt_dev->flows[flow].flags = 0; in f_pick() 2364 pkt_dev->flows[flow].x = x; in get_ipsec_sa() 2601 pkt_dev->flows[flow].count++; in mod_cur_headers() 2685 pkt_dev->flows[i].x = NULL; in free_SAs() 3753 if (pkt_dev->flows == NULL) { in pktgen_add_device() 3813 vfree(pkt_dev->flows); in pktgen_add_device() [all …]
|
| /linux/Documentation/admin-guide/pm/ |
| A D | system-wide.rst | 11 suspend-flows
|
| A D | suspend-flows.rst | 25 The kernel code flows associated with the suspend and resume transitions for 27 significant differences between the :ref:`suspend-to-idle <s2idle>` code flows 28 and the code flows related to the :ref:`suspend-to-RAM <s2ram>` and 35 available. Apart from that, the suspend and resume code flows for these sleep
|
| /linux/Documentation/admin-guide/blockdev/drbd/ |
| A D | figures.rst | 5 Data flows that Relate some functions, and write packets
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | tc_tun_encap.c | 270 list_for_each_entry(efi, &e->flows, list) { in mlx5e_take_all_encap_flows() 391 list_for_each_entry_safe(efi, tmp, &e->flows, list) { in mlx5e_tc_update_neigh_used_value() 436 WARN_ON(!list_empty(&e->flows)); in mlx5e_encap_dealloc() 453 WARN_ON(!list_empty(&d->flows)); in mlx5e_decap_dealloc() 817 INIT_LIST_HEAD(&e->flows); in mlx5e_attach_encap() 854 list_add(&flow->encaps[out_index].list, &e->flows); in mlx5e_attach_encap() 925 INIT_LIST_HEAD(&d->flows); in mlx5e_attach_decap() 948 list_add(&flow->l3_to_l2_reformat, &d->flows); in mlx5e_attach_decap()
|