Lines Matching full:echo

11   echo "Script for testing HBM (Host Bandwidth Manager) framework."
12 echo "It creates a cgroup to use for testing and load a BPF program to limit"
13 echo "egress or ingress bandwidth. It then uses iperf3 or netperf to create"
14 echo "loads. The output is the goodput in Mbps (unless -D was used)."
15 echo ""
16 echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>]"
17 echo " [-D] [-d=<delay>|--delay=<delay>] [--debug] [-E] [--edt]"
18 echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]"
19 echo " [-l] [-N] [--no_cn] [-p=<port>|--port=<port>] [-P]"
20 echo " [-q=<qdisc>] [-R] [-s=<server>|--server=<server]"
21 echo " [-S|--stats] -t=<time>|--time=<time>] [-w] [cubic|dctcp]"
22 echo " Where:"
23 echo " out egress (default)"
24 echo " -b or --bpf BPF program filename to load and attach."
25 echo " Default is hbm_out_kern.o for egress,"
26 echo " -c or -cc TCP congestion control (cubic or dctcp)"
27 echo " --debug print BPF trace buffer"
28 echo " -d or --delay add a delay in ms using netem"
29 echo " -D In addition to the goodput in Mbps, it also outputs"
30 echo " other detailed information. This information is"
31 echo " test dependent (i.e. iperf3 or netperf)."
32 echo " -E enable ECN (not required for dctcp)"
33 echo " --edt use fq's Earliest Departure Time (requires fq)"
34 echo " -f or --flows number of concurrent flows (default=1)"
35 echo " -i or --id cgroup id (an integer, default is 1)"
36 echo " -N use netperf instead of iperf3"
37 echo " --no_cn Do not return CN notifications"
38 echo " -l do not limit flows using loopback"
39 echo " -h Help"
40 echo " -p or --port iperf3 port (default is 5201)"
41 echo " -P use an iperf3 instance for each flow"
42 echo " -q use the specified qdisc"
43 echo " -r or --rate rate in Mbps (default 1s 1Gbps)"
44 echo " -R Use TCP_RR for netperf. 1st flow has req"
45 echo " size of 10KB, rest of 1MB. Reply in all"
46 echo " cases is 1 byte."
47 echo " More detailed output for each flow can be found"
48 echo " in the files netperf.<cg>.<flow>, where <cg> is the"
49 echo " cgroup id as specified with the -i flag, and <flow>"
50 echo " is the flow id starting at 1 and increasing by 1 for"
51 echo " flow (as specified by -f)."
52 echo " -s or --server hostname of netperf server. Used to create netperf"
53 echo " test traffic between to hosts (default is within host)"
54 echo " netserver must be running on the host."
55 echo " -S or --stats whether to update hbm stats (default is yes)."
56 echo " -t or --time duration of iperf3 in seconds (default=5)"
57 echo " -w Work conserving flag. cgroup can increase its"
58 echo " bandwidth beyond the rate limit specified"
59 echo " while there is available bandwidth. Current"
60 echo " implementation assumes there is only one NIC"
61 echo " (eth0), but can be extended to support multiple"
62 echo " NICs."
63 echo " cubic or dctcp specify which TCP CC to use"
64 echo " "
97 echo "bpffs already mounted"
99 echo "bpffs not mounted. Mounting..."
106 echo "./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog" > hbm.out
107 echo " " >> hbm.out
109 echo $!
197 echo "Unknown arg:$i"
218 echo $$ >> $cg_dir/cgroup.procs
237 echo "WARNING: Ignoring -q options because -d option used"
312 echo ""
313 echo "Details for HBM in cgroup $id"
326 echo "rate for flow $flow_cnt: $r"
329 echo "-----"
330 echo "Details for cgroup $id, flow $flow_cnt"
336 echo ""
338 echo "PING AVG DELAY:$delay"
339 echo "AGGREGATE_GOODPUT:$rate"
341 echo $rate
348 rate=`echo $rates | grep -o "[0-9]*$"`
351 echo ""
352 echo "Details for HBM in cgroup $id"
359 echo "PING AVG DELAY:$delay"
360 echo "AGGREGATE_GOODPUT:$rate"
362 echo $rate
377 echo ""
378 echo "Details for HBM in cgroup $id"
388 # echo "rate for flow $flow_cnt: $r"
390 echo "Rate for cgroup $id, flow $flow_cnt LOCAL_SEND_THROUGHPUT=$r"
397 echo "PING AVG DELAY:$delay"
398 echo "AGGREGATE_GOODPUT:$rate"
400 echo $rate