diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6a9e3b2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +*.pyc +video_files +rate-* diff --git a/TODO.txt b/TODO.txt new file mode 100644 index 0000000..ac361d3 --- /dev/null +++ b/TODO.txt @@ -0,0 +1,12 @@ +Make a streaming client + Socket Communication + Logic for when to request files + Get available bandwidth estimates + Video playback rate as a function of available bandwidth + see figure 9, should be easy given that + Also see text under Figure 20, suggests ten samples +Make a streaming server + Socket Communication + Generate files +Make topology + Bandwidth limited to 5Mbps, buffer size is 120kbit, 4-20 ms RTT diff --git a/client.py b/client.py new file mode 100644 index 0000000..15e116d --- /dev/null +++ b/client.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +import socket +import sys +import os +import threading +import time +import subprocess +from mysocket import send_message, receive_message +from playback_buffer import PlaybackBuffer + +from extras import exec_and_monitor, start_tcpprobe, MovingRecord + +from argparse import ArgumentParser +MOVING_MEDIAN_FILTER_SAMPLE_SIZE =10 +MOVING_AVG_FILTER_SAMPLE_SIZE = 10 + +# rate (bytes per second) = (rate in kilobits * 1000 bits/1 kilo) / 8 bits/byte; +RATES = [235, 375, 560, 750, 1050, 1400, 1750] +# bytes per sec +# [29375, 46875, 70000, 93750, 131250, 175000, 218750] +# bytes for 4 seconds +# [117500, 187500, 280000, 375000, 525000, 700000, 875000] +RATES[:] = [x* (1000 / 8) for x in RATES] + +class Client: + + def __init__(self, server_ip, dir, buffer_size, debug=False): + self.server_ip = server_ip + self.video_rate_filename = '%s/video_rate.txt' % dir + self.tcp_rate_filename = '%s/tcp_rate.txt' % dir + self.request_interval_filename = '%s/request_interval.txt' % dir + self.debug = debug + self.download_record = MovingRecord(10) + self.playback_buffer = PlaybackBuffer(buffer_size, debug=False) + + exec_and_monitor(host=None, cmd=[ + 'python', 'monitor_throughput.py', '--interface', 'h1-eth0', + '--fname', self.tcp_rate_filename, '--field-name', + 'instantaneous-throughput', '--gran']) + + def test_rates(self): + nsamples = 3 + results = [0] * len(RATES) + for i in range(nsamples): + for idx, r in enumerate(RATES): + self.real_print('checking rate %d for the %dth time' % (r * 8 / 1000, i)) + bytes_needed = r * 4 + download_segment = self.request_segment(bytes_needed) + bw = download_segment[0] * 8 / download_segment[1] + results[idx] += bw + time.sleep(4) + results = [x / nsamples for x in results] + result_dict = {} + for i in range(len(RATES)): + result_dict[RATES[i] * 8 / 1000] = results[i] + self.real_print('-' * 20) + self.real_print('Results {rate : bandwidth}') + self.real_print('-' * 20) + self.real_print(result_dict) + self.real_print('Hit Ctrl-C to exit (Sorry)') + + def run_forever(self): + self.set_up_connection() + self.playback_buffer.start() + if args.dl_rates: + self.test_rates() + return + self.start_time = time.time() + last_request_time = self.start_time + while True: + # Pick rate based on current bandwidth, which is a function of last download size and download_time + rate = self.pick_rate(args.force is not None and time.time() > self.start_time + args.force) + bytes_needed = rate * 4 + if self.playback_buffer.available_bytes() == 0: + self.debug_print('buffer too small') + time.sleep(1) + else: + next_request_time = time.time() + request_interval = next_request_time - last_request_time + last_request_time = next_request_time + self.debug_print('buffer big enough') + download_segment = self.request_segment(bytes_needed) + self.download_record.add_sample(download_segment[0] * 8 / download_segment[1]) + request_interval_file = open(self.request_interval_filename, 'a') + request_interval_file.write("%f,request-interval,%f,0,0,0,0,0,0\n" % (last_request_time, request_interval)) + request_interval_file.close() + self.debug_print('rate: %d\n' % (rate * 8 / 1000)) + + def set_up_connection(self): + self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.socket.connect((self.server_ip, 8000,)) + + # Not sure if the callback is necessary. + def request_segment(self, bytes_four_second): + # Request 4-second segments of video + send_message(self.socket, "%s" % bytes_four_second) + (message, download_time) = receive_message(self.socket, timed=True) + self.debug_print('download time %f message length (bytes) %d bw %d' % (download_time, len(message), (len(message)*8)/download_time)) + self.playback_buffer.add_four_second_segment(bytes_four_second) + return [len(message), download_time] + + # Return tuple of total download size and total download time + def current_bandwidth_estimate(self): + return self.download_record.average() + + def pick_rate(self, force): + # According to Figure 9 + throughput = self.current_bandwidth_estimate() + + # print 'size (bits) %d time %f bw %f instantaneous bw %f' %(bw[0], bw[1], (bw[0]/bw[1]), ((download_segment[0] * 8)/download_segment[1])) + + # bw is in kbps + if throughput > (2500 * 1000): + rate = RATES[6] # 1750 + elif throughput > (2150 * 1000): + rate = RATES[5] # 1400 + elif throughput > (1300 * 1000): + rate = RATES[4] # 1050 + elif throughput > (1100 * 1000): + rate = RATES[3] # 750 + elif throughput > (740 * 1000): + rate = RATES[2] # 560 + elif throughput > (500 * 1000): + rate = RATES[1] # 375 + else: + rate = RATES[0] + + if force: + rate = RATES[-1] + + # unix_timestamp;iface_name;bytes_out;bytes_in;bytes_total;packets_out;packets_in;packets_total;errors_out;errors_in + self.rate_file = open(self.video_rate_filename, 'a') + # video flow throughput is the running average bw + current_time = time.time() + self.rate_file.write("%f,video-flow-throughput,0,%d,0,0,0,0,0\n" % (current_time, throughput)) + # Translate rate to bits per second + self.rate_file.write("%f,video-playback-rate,0,%d,0,0,0,0,0\n" % (current_time, (rate * 8))) + self.rate_file.close() + + return rate + + def real_print(self, msg): + print msg + sys.stdout.flush() + + def debug_print(self, msg): + if self.debug: + self.real_print(msg) + +def parse_args(): + parser = ArgumentParser(description="Buffer sizing tests") + + parser.add_argument('--server-ip', + action="store", + help="Max buffer size of network interface in packets", + required=True) + + parser.add_argument('--dir', '-d', + dest="dir", + action="store", + help="Directory to store output", + required=True) + + parser.add_argument('--buffer-size', '-b', + action="store", + help="Buffer size", + required=True, + type=int) + + parser.add_argument('--force', '-f', + action="store", + help="Time to force the video rate to full", + required=False, + type=int) + + parser.add_argument('--check-download-rates', help="Enable the source port filter (Default is dest port)", action='store_true', dest="dl_rates", default=False) + + return parser.parse_args() + +if __name__ == '__main__': + try: + args = parse_args() + Client(server_ip=args.server_ip, dir=args.dir, buffer_size=args.buffer_size, debug=False).run_forever() + except Exception as e: + print '----- error in client -----' + print e + sys.stdout.flush() + sys.stderr.flush() diff --git a/extras.py b/extras.py new file mode 100644 index 0000000..16202da --- /dev/null +++ b/extras.py @@ -0,0 +1,104 @@ +import os +import subprocess +import sys +import threading + +import termcolor as T + +class MovingRecord: + def __init__(self, num_samples): + self.samples = [] + self.num_samples = num_samples + + def add_sample(self, sample): + if len(self.samples) == self.num_samples: + self.samples.pop(0) + self.samples.append(sample) + + def average(self): + if len(self.samples) == 0: + return 0 + return sum(self.samples) / len(self.samples) + + def median(self): + n = len(self.samples) + if n == 0: + return 0 + s = sorted(self.samples) + if n % 2 == 0: + return (s[n/2] + s[n/2 - 1]) / 2 + else: + return s[n/2] + +def start_tcpprobe(fname): + "Install tcp_probe module and dump to file" + os.system("rmmod tcp_probe 2>/dev/null; modprobe tcp_probe;") + subprocess.Popen("cat /proc/net/tcpprobe > %s" % + fname, shell=True) + +def stop_tcpprobe(): + os.system("killall -9 cat; rmmod tcp_probe &>/dev/null;") + +def cprint(s, color, cr=True): + """Print in color + s: string to print + color: color to use""" + if cr: + print T.colored(s, color) + else: + print T.colored(s, color), + +def monitor_stderr(p): + lines_iterator = iter(p.stderr.readline, b"") + for line in lines_iterator: + print(line.strip()) + +def monitor_stdout(p): + lines_iterator = iter(p.stdout.readline, b"") + for line in lines_iterator: + print(line.strip()) + +def exec_and_monitor(host, cmd): + if host is not None: + p = host.popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + p = subprocess.Popen(args=cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + t1 = threading.Thread(target=monitor_stdout, args=(p,)) + t1.setDaemon(True) + t1.start() + t2 = threading.Thread(target=monitor_stderr, args=(p,)) + t2.setDaemon(True) + t2.start() + return p + +def verify_rtt_between_endpoints(e1, e2, expected_rtt, tolerance): + proc = e1.popen('ping -c 2 %s' % e2.IP()) + output = proc.communicate()[0] + line = output.split('\n')[2] + # "64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=174 ms" + rtt = float(line.split(' ')[-2][5:]) + if rtt < expected_rtt*(1-tolerance) or rtt > expected_rtt*(1+tolerance): + print 'Latency verification failed' + print 'Got %f instead of %f' % (rtt, expected_rtt) + sys.exit() + +def verify_latency(h1, h2, expected_rtt): + print 'Verifying client-server latency...' + verify_rtt_between_endpoints(h1, h2, expected_rtt, 0.05) + verify_rtt_between_endpoints(h2, h2, expected_rtt, 0.05) + +def verify_bandwidth(h1, h2, expected_bandwidth): + print 'Verifying bandwidth of bottleneck link...' + server_proc = h2.popen('iperf -s -p 5001') + client_proc = h1.popen('iperf -c %s -t 20' % h2.IP()); + output = client_proc.communicate()[0] + # "[ 15] 0.0- 5.1 sec 38.0 MBytes 61.9 Mbits/sec" + line = output.split('\n')[6] + bandwidth = float(line.split(' ')[-2]) + tolerance = 0.1 + if bandwidth < expected_bandwidth*(1-tolerance) or \ + bandwidth > expected_bandwidth*(1+tolerance): + print 'Bandwidth verification failed' + print 'Got %f instead of %f' % (bandwidth, expected_bandwidth) + server_proc.kill() diff --git a/monitor_throughput.py b/monitor_throughput.py new file mode 100644 index 0000000..b4177b9 --- /dev/null +++ b/monitor_throughput.py @@ -0,0 +1,85 @@ +#!/usr/bin/python + +import threading +import time +import sys + +from extras import MovingRecord +from argparse import ArgumentParser + +def debug_print(msg): + print msg + sys.stdout.flush() + +def get_rxbytes(iface): + f = open('/proc/net/dev', 'r') + lines = f.readlines() + for line in lines: + if iface in line: + break + f.close() + if not line: + raise Exception("could not find iface %s in /proc/net/dev:%s" % + (iface, lines)) + # Extract TX bytes from: + #Inter-| Receive | Transmit + # face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + # lo: 6175728 53444 0 0 0 0 0 0 6175728 53444 0 0 0 0 0 0 + return float(line.split()[1]) + +def monitor_tcp_throughput(iface, fname, field_name): + last_time = time.time() + first_time = last_time + last_rxbytes = get_rxbytes(iface) + record = MovingRecord(1) + while True: + if args.gran: + time.sleep(0.2) + else: + time.sleep(4.0) + rxbytes = get_rxbytes(iface) + current_time = time.time() + throughput = (rxbytes-last_rxbytes)/(current_time-last_time) + record.add_sample(throughput) + last_time = current_time + last_rxbytes = rxbytes + f = open(fname, 'a') + f.write("%f,%s,0,%d,0,0,0,0,0\n" % + (current_time, field_name, record.average()*8)) + f.close() + +def parse_args(): + parser = ArgumentParser(description="Monitor TCP throughput") + + parser.add_argument('--interface', + action='store', + help='Interface to monitor', + required=True) + + parser.add_argument('--fname', '-f', + action='store', + help='File to write', + required=True) + + parser.add_argument('--field-name', + action='store', + help='Field name for the graph', + required=True) + + parser.add_argument('--gran', + action='store_true', + help='Field name for the graph', + default=False) + + return parser.parse_args() + + +if __name__ == '__main__': + try: + args = parse_args() + monitor_tcp_throughput(args.interface, args.fname, args.field_name) + except Exception as e: + print '----- error in tcp monitoring -----' + print e + sys.stdout.flush() + sys.stderr.flush() diff --git a/mysocket.py b/mysocket.py new file mode 100644 index 0000000..632bf3e --- /dev/null +++ b/mysocket.py @@ -0,0 +1,30 @@ +import socket +import time + +def send_message(socket, msg): + total_bytes_written = 0 + msg_len = len(msg) + while total_bytes_written < msg_len: + bytes_written = socket.send(msg) + if bytes_written == 0: + return False + total_bytes_written += bytes_written + return socket.send("\n") == 1 + + +def receive_message(socket, timed=False): + message = '' + start_time = time.time() + download_time = 0 + while True: + chunk = socket.recv(1024) + if chunk == '': + return 0 + message += chunk + if message[-1] == "\n": + download_time = time.time() - start_time + break + if timed: + return message[:-1], download_time + else: + return message[:-1] diff --git a/playback_buffer.py b/playback_buffer.py new file mode 100644 index 0000000..fe46c5e --- /dev/null +++ b/playback_buffer.py @@ -0,0 +1,57 @@ +import sys +import time +import threading + +class PlaybackBuffer: + def __init__(self, buffer_size, debug=False): + self.segments = [] + self.buffer_size = buffer_size + self.buffer = 0 # Number of seconds in buffer + self.current_rate = 0 + self.lock = threading.Lock() + self.debug = debug + + def start(self): + t = threading.Thread(target=PlaybackBuffer.run_forever, args=(self,)) + t.setDaemon(True) + t.start() + + def _run_forever(self): + interval = 0.5 + while True: + time.sleep(interval) + self.lock.acquire() + if self.buffer > 0: + self.buffer = self.buffer - 0.5 + self.debug_print("Drain buffer %f" % self.buffer) + self.lock.release() + + def run_forever(self): + try: + self._run_forever() + except Exception as e: + print '----- error in buffer -----' + print e + import traceback + traceback.print_exc() + sys.stdout.flush() + + # Assume available_bytes is checked before adding + def add_four_second_segment(self, nbytes_four_second): + self.lock.acquire() + self.buffer = self.buffer + 4 # Each buffer element is a 4 second chunk + self.debug_print("Add buffer %f" % self.buffer) + self.lock.release() + + def available_bytes(self): + available = 0 + self.lock.acquire() + if (self.buffer <= self.buffer_size): # Buffer contains 240 seconds of video play + available = 1 + self.lock.release() + return available + + def debug_print(self, msg): + if self.debug: + print msg + sys.stdout.flush() diff --git a/rate.py b/rate.py new file mode 100644 index 0000000..3ebf052 --- /dev/null +++ b/rate.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +"CS244 Assignment 2: Buffer Sizing" + +from mininet.topo import Topo +from mininet.node import CPULimitedHost +from mininet.link import TCLink +from mininet.net import Mininet +from mininet.log import lg +from mininet.util import dumpNodeConnections +from mininet.cli import CLI + +import threading +import subprocess +from subprocess import Popen, PIPE +import time +from multiprocessing import Process +from argparse import ArgumentParser + +from extras import exec_and_monitor, cprint + +import sys +import os +from util.monitor import monitor_qlen + +# Time to wait between samples, in seconds, as a float. +SAMPLE_PERIOD_SEC = 1.0 + +# Time to wait for first sample, in seconds, as a float. +SAMPLE_WAIT_SEC = 3.0 + +# Parse arguments + +parser = ArgumentParser(description="Buffer sizing tests") + +# Default maxq = 120 * 10^3 bit / (1500 bytes * 8 bits/byte) = 10 packets +parser.add_argument('--maxq', + dest="maxq", + action="store", + type=int, + help="Max buffer size of network interface in packets", + default=10) + +parser.add_argument('--time', '-t', + action="store", + type=int, + help="Time to run experiment", + default=400) + +parser.add_argument('--competing-flow-wait-time', '-c', + action="store", + type=int, + help="Time to wait before starting competing flow", + default=200) + +parser.add_argument('--competing-flow-run-time', '-r', + action="store", + type=int, + help="Time to wait before stopping competing flow", + default=200) + +parser.add_argument('--force', + action="store", + type=int, + help="Time to force the playback rate", + default=200) + +parser.add_argument('--playback-buffer-size', '-p', + action="store", + type=int, + help="Playback buffer size", + default=240) + +parser.add_argument('--dir', '-d', + dest="dir", + action="store", + help="Directory to store output", + required=True) + +parser.add_argument('--test', + dest="test", + action="store", + help="Test to run", + required=True) + +# Expt parameters +args = parser.parse_args() + +if not os.path.exists(args.dir): + os.makedirs(args.dir) + +lg.setLogLevel('info') + +# Topology to be instantiated in Mininet +class StarTopo(Topo): + # bw_host = bandwidth of long-lived TCP video and competing streams + # = 100 Mb/s + # delay = RTT = 4 ms + # bw_net = bottleneck bandwidth = 5 Mb/s + # maxq = default 10 pkts (but we will vary this parameter) + def __init__(self, n=3, cpu=None, bw_host=None, bw_net=None, + delay=None, maxq=None): + super(StarTopo, self ).__init__() + self.n = n + self.cpu = cpu + self.bw_host = 100 + self.bw_net = 5 + self.delay = '1ms' + self.maxq = args.maxq + self.create_topology() + + # TODO: Fill in the following function to Create the experiment + # topology Set appropriate values for bandwidth, delay, and queue + # size. + def create_topology(self): + server = self.addHost('h%d' % self.n) + switch = self.addSwitch('s1') + for i in range(self.n-1): + host = self.addHost('h%d' % (i+1)) + self.addLink(host, switch, bw=self.bw_host, + delay=self.delay, use_htb=True) + self.addLink(switch, server, bw=self.bw_net, delay=self.delay, + max_queue_size=self.maxq, use_htb=True) + + +def start_tcpprobe(): + "Install tcp_probe module and dump to file" + os.system("rmmod tcp_probe 2>/dev/null; modprobe tcp_probe port=8000;") + Popen("cat /proc/net/tcpprobe > %s/tcp_probe.txt" % + args.dir, shell=True) + +def stop_tcpprobe(): + os.system("killall -9 cat; rmmod tcp_probe &>/dev/null;") + +def count_connections(): + "Count current connections in iperf output file" + out = args.dir + "/iperf_server.txt" + lines = Popen("grep connected %s | wc -l" % out, + shell=True, stdout=PIPE).communicate()[0] + return int(lines) + +def set_q(iface, q): + "Change queue size limit of interface" + cmd = ("tc qdisc change dev %s parent 5:1 " + "handle 10: netem limit %s" % (iface, q)) + #os.system(cmd) + subprocess.check_output(cmd, shell=True) + +def set_speed(iface, spd): + "Change htb maximum rate for interface" + cmd = ("tc class change dev %s parent 5:0 classid 5:1 " + "htb rate %s burst 15k" % (iface, spd)) + os.system(cmd) + +def start_competing_flow(net): + h2, h3 = net.getNodeByName('h2', 'h3') + h2.popen( + "iperf -s -p 5001 -w 16m > %s/iperf_server.txt" % args.dir, shell=True) + h3.popen("iperf -c %s -p 5001 -t 3600 -i 1 -y -Z cong > %s/competing_flow.txt" % + (h2.IP(), args.dir,), shell=True) + fname = '%s/competing_throughput.txt' % args.dir + exec_and_monitor(h2, 'python monitor_throughput.py --interface h2-eth0 --fname %s --field-name competing-throughput' % fname) + +def stop_competing_flow(net): + os.system('killall -9 iperf') + +def force_test(net): + pass + +def download_rates_test(net): + h1, h2, h3 = net.getNodeByName('h1', 'h2', 'h3') + print 'starting server' + server_proc = exec_and_monitor(h3, 'python server.py') + time.sleep(0.5) + + print 'starting client' + client_proc = exec_and_monitor( + h1, 'python client.py --server-ip %s --dir %s --buffer-size %d --check-download-rates' % + (h3.IP(), args.dir, args.playback_buffer_size)) + + while True: + time.sleep(5) + +def normal_test(net, force_time): + h1, h2, h3 = net.getNodeByName('h1', 'h2', 'h3') + print 'starting server' + server_proc = exec_and_monitor(h3, 'python server.py') + time.sleep(0.5) + + start_tcpprobe() + + print 'starting client' + if force_time is not None: + client_proc = exec_and_monitor( + h1, 'python client.py --server-ip %s --dir %s --buffer-size %d --force %d' % + (h3.IP(), args.dir, args.playback_buffer_size, force_time)) + else: + client_proc = exec_and_monitor( + h1, 'python client.py --server-ip %s --dir %s --buffer-size %d' % + (h3.IP(), args.dir, args.playback_buffer_size)) + + # Queue monitoring start + iface='s1-eth3' + outfile = '%s/qlen_%s.txt' % (args.dir, iface) + monitor = Process(target=monitor_qlen, args=(iface, 0.01, outfile)) + monitor.start() + + start_time = time.time() + competing_flow_started = False + competing_flow_stopped = False + competing_flow_start_time = start_time + args.competing_flow_wait_time + competing_flow_stop_time = competing_flow_start_time + args.competing_flow_run_time + while (True): + current_time = time.time() + if current_time > start_time + args.time: + break + if (current_time > competing_flow_start_time and not competing_flow_started): + start_competing_flow(net) + competing_flow_started = True + if (current_time > competing_flow_stop_time and not competing_flow_stopped): + stop_competing_flow(net) + competing_flow_stopped = True + cprint("Time remaining: %f" % (start_time + args.time - current_time), "blue") + time.sleep(5) + + server_proc.kill() + client_proc.kill() + + # Queue monitoring stop + monitor.terminate() + + net.stop() + + +def main(): + start = time.time() + "Create network and run Buffer Sizing experiment" + # Reset to known state + topo = StarTopo(n=3, bw_host=100, delay='4ms', bw_net=5, maxq=args.maxq) + net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink) + net.start() + dumpNodeConnections(net.hosts) + net.pingAll() + + + cprint("Starting experiment", "green") + print "Directory logs: %s" % args.dir + + if args.test == 'none': + normal_test(net, None) + elif args.test == 'dl': + download_rates_test(net) + elif args.test == 'force': + normal_test(net, args.force) + + # Shut down iperf processes + os.system('killall -9 iperf') + + Popen("killall -9 top bwm-ng tcpdump cat mnexec iperf python", shell=True).wait() + stop_tcpprobe() + end = time.time() + cprint("Sweep took %.3f seconds" % (end - start), "yellow") + +if __name__ == '__main__': + try: + main() + except: + print "-"*80 + print "Caught exception. Cleaning up..." + print "-"*80 + import traceback + traceback.print_exc() + os.system("killall -9 top bwm-ng tcpdump cat mnexec iperf python; mn -c") diff --git a/run.sh b/run.sh new file mode 100644 index 0000000..badc79e --- /dev/null +++ b/run.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Exit on any failure +set -e + +# Check for uninitialized variables +set -o nounset + +ctrlc() { + killall -9 python + mn -c + exit +} + +trap ctrlc SIGINT + +start=`date` +exptid=`date +%b%d-%H:%M` + +rootdir=rate-$exptid +plotpath=util +iperf=~/iperf-patched/src/iperf + +# TODO: change the interface name for which queue size is adjusted +# Links are numbered as switchname-eth1,2,etc in the order they are +# added to the topology. +iface=s1-eth3 + +if [ $# -eq 1 ]; then + test=$1 +else + test="none" +fi + +for run in 1; do + dir=$rootdir + + ./rate.py --maxq 10 --dir $dir -t 1300 -c 300 -p 240 -r 900 --force 450 --test $test # 120 * 10^3 bit / (1500 bytes * 8 bits/byte) = 10 packets + + if [ $test = "none" ]; then + echo "Figure 20 in rate.png" + python $plotpath/plot_rates.py -f $dir/video_rate.txt $dir/competing_throughput.txt -o $dir/rate.png --rx --miny 0 --maxy 5000 + echo "Figure 6a in tcp_rate.png" + python $plotpath/plot_rates.py -f $dir/tcp_rate.txt -o $dir/tcp_rate.png --rx --miny 0 --maxy 5000 --start 160 --length 50 + echo "Figure 8a in cwnd.png" + python $plotpath/plot_tcpprobe.py -f $dir/tcp_probe.txt -o $dir/cwnd.png -p 8000 --sport -s 600 -l 3 + echo "Figure 6b in request-interval.png" + python $plotpath/plot_request_interval.py -f $dir/request_interval.txt -o $dir/request-interval.png --miny 0 --maxy 10 + elif [ $test = "dl" ]; then + echo "Hit Ctrl-C to exit" + elif [ $test = "force" ]; then + echo "Figure 5 in rate.png" + python $plotpath/plot_rates.py -f $dir/video_rate.txt $dir/competing_throughput.txt -o $dir/rate.png --rx --miny 0 --maxy 5000 + fi +done diff --git a/server.py b/server.py new file mode 100644 index 0000000..b75c4a3 --- /dev/null +++ b/server.py @@ -0,0 +1,45 @@ +#!/usr/bin/python +import os +import sys +import socket +import threading +import time +from mysocket import send_message, receive_message + +from argparse import ArgumentParser + +class Server: + def __init__(self, debug=False): + self.debug = debug + + def run_forever(self): + self.debug_print('server hello') + serversocket = socket.socket() + serversocket.bind(('0.0.0.0', 8000)) + serversocket.listen(5) + (self.clientsocket, addr) = serversocket.accept() + + while True: + response_length = int(receive_message(self.clientsocket)) + self.debug_print('server received request') + if response_length == 0: + print "something is wrong?" + break + else: + send_message(self.clientsocket, "-" * response_length) + self.debug_print('server sent response length %d' % response_length) + + def debug_print(self, msg): + if self.debug: + print msg + sys.stdout.flush() + +if __name__ == '__main__': + try: + Server(debug=False).run_forever() + except Exception as e: + print '----- error in server -----' + print e + import traceback + traceback.print_exc() + sys.stdout.flush() diff --git a/util/__init__.py b/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/util/helper.py b/util/helper.py new file mode 100644 index 0000000..d1e49d5 --- /dev/null +++ b/util/helper.py @@ -0,0 +1,127 @@ +''' +Helper module for the plot scripts. +''' + +import re +import itertools +import matplotlib as m +import os +if os.uname()[0] == "Darwin": + m.use("MacOSX") +else: + m.use("Agg") +import matplotlib.pyplot as plt +import argparse +import math + +def read_list(fname, delim=','): + lines = open(fname).xreadlines() + ret = [] + for l in lines: + ls = l.strip().split(delim) + ls = map(lambda e: '0' if e.strip() == '' or e.strip() == 'ms' or e.strip() == 's' else e, ls) + ret.append(ls) + return ret + +def ewma(alpha, values): + if alpha == 0: + return values + ret = [] + prev = 0 + for v in values: + prev = alpha * prev + (1 - alpha) * v + ret.append(prev) + return ret + +def col(n, obj = None, clean = lambda e: e): + """A versatile column extractor. + + col(n, [1,2,3]) => returns the nth value in the list + col(n, [ [...], [...], ... ] => returns the nth column in this matrix + col('blah', { ... }) => returns the blah-th value in the dict + col(n) => partial function, useful in maps + """ + if obj == None: + def f(item): + return clean(item[n]) + return f + if type(obj) == type([]): + if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})): + return map(col(n, clean=clean), obj) + if type(obj) == type([]) or type(obj) == type({}): + try: + return clean(obj[n]) + except: + print T.colored('col(...): column "%s" not found!' % (n), 'red') + return None + # We wouldn't know what to do here, so just return None + print T.colored('col(...): column "%s" not found!' % (n), 'red') + return None + +def transpose(l): + return zip(*l) + +def avg(lst): + return sum(map(float, lst)) / len(lst) + +def stdev(lst): + mean = avg(lst) + var = avg(map(lambda e: (e - mean)**2, lst)) + return math.sqrt(var) + +def xaxis(values, limit): + l = len(values) + return zip(*map(lambda (x,y): (x*1.0*limit/l, y), enumerate(values))) + +def grouper(n, iterable, fillvalue=None): + "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + return itertools.izip_longest(fillvalue=fillvalue, *args) + +def cdf(values): + values.sort() + prob = 0 + l = len(values) + x, y = [], [] + + for v in values: + prob += 1.0 / l + x.append(v) + y.append(prob) + + return (x, y) + +def parse_cpu_usage(fname, nprocessors=8): + """Returns (user,system,nice,iowait,hirq,sirq,steal) tuples + aggregated over all processors. DOES NOT RETURN IDLE times.""" + + data = grouper(nprocessors, open(fname).readlines()) + + """Typical line looks like: + Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st + """ + ret = [] + for collection in data: + total = [0]*8 + for cpu in collection: + usages = cpu.split(':')[1] + usages = map(lambda e: e.split('%')[0], + usages.split(',')) + for i in xrange(len(usages)): + total[i] += float(usages[i]) + total = map(lambda t: t/nprocessors, total) + # Skip idle time + ret.append(total[0:3] + total[4:]) + return ret + +def pc95(lst): + l = len(lst) + return sorted(lst)[ int(0.95 * l) ] + +def pc99(lst): + l = len(lst) + return sorted(lst)[ int(0.99 * l) ] + +def coeff_variation(lst): + return stdev(lst) / avg(lst) + diff --git a/util/monitor.py b/util/monitor.py new file mode 100644 index 0000000..f7aeb73 --- /dev/null +++ b/util/monitor.py @@ -0,0 +1,88 @@ +from time import sleep, time +from subprocess import * +import re + +default_dir = '.' + +def monitor_qlen(iface, interval_sec = 0.01, fname='%s/qlen.txt' % default_dir): + pat_queued = re.compile(r'backlog\s[^\s]+\s([\d]+)p') + cmd = "tc -s qdisc show dev %s" % (iface) + ret = [] + open(fname, 'w').write('') + while 1: + p = Popen(cmd, shell=True, stdout=PIPE) + output = p.stdout.read() + # Not quite right, but will do for now + matches = pat_queued.findall(output) + if matches and len(matches) > 1: + ret.append(matches[1]) + t = "%f" % time() + open(fname, 'a').write(t + ',' + matches[1] + '\n') + sleep(interval_sec) + #open('qlen.txt', 'w').write('\n'.join(ret)) + return + +def monitor_count(ipt_args="--src 10.0.0.0/8", + interval_sec=0.01, fname='%s/bytes_sent.txt' + % default_dir, chain="OUTPUT"): + cmd = "iptables -I %(chain)s 1 %(filter)s -j RETURN" % { + "filter": ipt_args, + "chain": chain, + } + # We always erase the first rule; will fix this later + Popen("iptables -D %s 1" % chain, shell=True).wait() + # Add our rule + Popen(cmd, shell=True).wait() + open(fname, 'w').write('') + cmd = "iptables -vnL %s 1 -Z" % (chain) + while 1: + p = Popen(cmd, shell=True, stdout=PIPE) + output = p.stdout.read().strip() + values = output.split(' ') + if len(values) > 2: + t = "%f" % time() + pkts, bytes = values[0], values[1] + open(fname, 'a').write(','.join([t, pkts, bytes]) + '\n') + sleep(interval_sec) + return + +def monitor_devs(dev_pattern='^s', fname="%s/bytes_sent.txt" % + default_dir, interval_sec=0.01): + + """Aggregates (sums) all txed bytes and rate (in Mbps) from + devices whose name matches @dev_pattern and writes to @fname""" + pat = re.compile(dev_pattern) + spaces = re.compile('\s+') + open(fname, 'w').write('') + prev_tx = {} + while 1: + lines = open('/proc/net/dev').read().split('\n') + t = str(time()) + total = 0 + for line in lines: + line = spaces.split(line.strip()) + iface = line[0] + if pat.match(iface) and len(line) > 9: + tx_bytes = int(line[9]) + total += tx_bytes - prev_tx.get(iface, tx_bytes) + prev_tx[iface] = tx_bytes + open(fname, 'a').write(','.join([t, + str(total * 8 / interval_sec / 1e6), str(total)]) + "\n") + sleep(interval_sec) + return + +def monitor_devs_ng(fname="%s/txrate.txt" % default_dir, interval_sec=0.01): + """Uses bwm-ng tool to collect iface tx rate stats. Very reliable.""" + cmd = ("sleep 1; bwm-ng -t %s -o csv " + "-u bits -T rate -C ',' > %s" % + (interval_sec * 1000, fname)) + Popen(cmd, shell=True).wait() + +def monitor_cpu(fname="%s/cpu.txt" % default_dir): + cmd = "(top -b -p 1 -d 1 | grep --line-buffered \"^Cpu\") > %s" % fname + # BL: Disabling until we reinstantiate attachment using setns. + #if container is not None: + # cmd = ("(top -b -p 1 -d 1 | " + # "grep --line-buffered \\\"^Cpu\\\") > %s" % fname) + # cmd = "lxc-execute -n %s -- bash -c \"%s\"" % (container, cmd) + Popen(cmd, shell=True).wait() diff --git a/util/plot.py b/util/plot.py new file mode 100644 index 0000000..da281fd --- /dev/null +++ b/util/plot.py @@ -0,0 +1,108 @@ +import matplotlib.pyplot as plt + + +def colorGenerator(): + "Return cycling list of colors" + colors = [ 'red', 'green', 'blue', 'orange', 'purple', 'cyan'] + index = 0 + while True: + yield colors[ index ] + index = ( index + 1 ) % len( colors ) + +def markerGenerator(): + "Return cycling list of colors" + markers = [ 'o', '^', 'v', '*', 'x', 'd'] + index = 0 + while True: + yield markers [ index ] + index = ( index + 1 ) % len(markers) + +def hatchGenerator(): + "Return cycling list of colors" + hatches = [ '/', '\\', '-', '.', '|', 'x'] + index = 0 + while True: + yield hatches[ index ] + index = ( index + 1 ) % len(hatches) + +def convertToStep(x, y): + """Convert to a "stepped" data format by duplicating all but the last elt.""" + newx = [] + newy = [] + for i, d in enumerate(x): + newx.append(d) + newy.append(y[i]) + if i != len(x) - 1: + newx.append(x[i + 1]) + newy.append(y[i]) + return newx, newy + +def convertToStepUpCDF(x, y): + """Convert to a "stepped" data format by duplicating all but the last elt. + + Step goes up, rather than to the right. + """ + newx = [] + newy = [] + for i, d in enumerate(x): + newx.append(d) + newy.append(y[i]) + if i != len(x) - 1: + newx.append(x[i]) + newy.append(y[i + 1]) + newx.append(newx[-1]) + newy.append(1.0) + return newx, newy + +def plotTimeSeries(data, title, xlabel, ylabel, step): + """Plot a time series. + + Input: data, a list of dicts. + Each inner-most dict includes the following fields: + x: array + y: array + label: string + + step: if True, display data in stepped form. + """ + fig = plt.figure() + ax = plt.subplot(111) + cgen = colorGenerator() + colors = {} + + fig.canvas.set_window_title( title ) + + for d in data: + x = d['x'] + y = d['y'] + if step: + x, y = convertToStep(x, y) + plt.plot( x, y, label=d['label'], color=cgen.next()) + + plt.xlabel( xlabel, fontsize=20 ) + plt.ylabel( ylabel, fontsize=20 ) + plt.grid( True ) + for tick in ax.xaxis.get_major_ticks(): + tick.label1.set_fontsize(18) + for tick in ax.yaxis.get_major_ticks(): + tick.label1.set_fontsize(18) + + return fig + +def plotCDF(data, title, xlabel, ylabel, step): + data_mod = [] + for index, line in enumerate(data): + vals = sorted(line['y']) + x = sorted(vals) + y = [(float(i) / len(x)) for i in range(len(x))] + x, y = convertToStepUpCDF(x, y) + entry = {} + entry['x'] = x + entry['y'] = y + entry['label'] = line['label'] + data_mod.append(entry) + + return plotTimeSeries(data_mod, title, xlabel, ylabel, step = False) + +if __name__ == '__main__': + print convertToStep([1, 2, 3], [4, 5, 6]) diff --git a/util/plot_cpu.py b/util/plot_cpu.py new file mode 100644 index 0000000..66b45da --- /dev/null +++ b/util/plot_cpu.py @@ -0,0 +1,87 @@ +''' +Plot CPU utilization of each virtual host. +''' + +from helper import * + +parser = argparse.ArgumentParser("Plot stacked bar chart of CPU usage") +parser.add_argument('--files', '-f', + help="File to read CPU usage from.", + required=True, + nargs="+", + dest="files") + +parser.add_argument('--out', '-o', + help="Output png for plot", + default=None, + dest="out") + +parser.add_argument('-s', '--summarise', + help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.", + default=False, + dest="summarise", + action="store_true") + +parser.add_argument('--labels', '-l', + help="Labels for x-axis if summarising; defaults to file names", + required=False, + default=None, + nargs="+", + dest="labels") + +args = parser.parse_args() +if args.labels is None: + args.labels = args.files + +def aggregate(data): + """Aggregates to give a total cpu usage""" + data = map(list, data) + return map(sum, zip(*data)) + +def plot_series(): + data = parse_cpu_usage(args.files[0]) + N = len(data) + data = transpose(data) + ind = range(N) + width=1 + colours = ['y','g','r','b','purple','brown','cyan'] + legend = "user,system,nice,iowait,hirq,sirq,steal".split(',') + nfields = 7 + legend = legend[0:nfields] + p = [0]*nfields + bottom = [0]*N + + plt.ylabel("CPU %") + plt.xlabel("Seconds") + for i in xrange(nfields): + p[i] = plt.bar(ind[0:N], data[i], width, bottom=bottom, color=colours[i]) + for j in xrange(N): + bottom[j] += data[i][j] + plt.legend(map(lambda e: e[0], p), legend) + +def plot_summary(): + plt.ylabel("CPU %") + to_plot=[] + for f in args.files: + data = parse_cpu_usage(f) + N = len(data) + data = transpose(data) + ind = range(N) + data = aggregate(data) + to_plot.append(data[10:-10]) + plots = plt.boxplot(to_plot) + plt.yticks(range(0,110,10)) + plt.title("CPU utilisation") + plt.grid() + plt.xticks(range(1, 1+len(args.files)), args.labels) + +if args.summarise: + plot_summary() +else: + plot_series() + +if args.out is None: + plt.show() +else: + plt.savefig(args.out) + diff --git a/util/plot_defaults.py b/util/plot_defaults.py new file mode 100644 index 0000000..c7ebe60 --- /dev/null +++ b/util/plot_defaults.py @@ -0,0 +1,50 @@ +''' +Matplotlib parameters to create pretty plots +''' + +from matplotlib import rc, rcParams + + +DEF_AXIS_LEFT = 0.15 +DEF_AXIS_RIGHT = 0.95 +DEF_AXIS_BOTTOM = 0.1 +DEF_AXIS_TOP = 0.95 +DEF_AXIS_WIDTH = DEF_AXIS_RIGHT - DEF_AXIS_LEFT +DEF_AXIS_HEIGHT = DEF_AXIS_TOP - DEF_AXIS_BOTTOM +# add_axes takes [left, bottom, width, height] +DEF_AXES = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, DEF_AXIS_WIDTH, DEF_AXIS_HEIGHT] + +AXIS_2Y_RIGHT = 0.8 +AXIS_2Y_WIDTH = AXIS_2Y_RIGHT - DEF_AXIS_LEFT +AXES_2Y = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, AXIS_2Y_WIDTH, DEF_AXIS_HEIGHT] + +AXES_LABELSIZE = 24 +TICK_LABELSIZE = 24 +TEXT_LABELSIZE = 24 + +COLOR_LIGHTGRAY = '#cccccc' + +#COLOR_HLINES = '#606060' +COLOR_HLINES = 'black' +HLINE_LABELSIZE = 24 +HLINE_LINEWIDTH = 2 + +rc('axes', **{'labelsize' : 'large', + 'titlesize' : 'large', + 'grid' : True}) +rc('legend', **{'fontsize': 'xx-large'}) +rcParams['axes.labelsize'] = AXES_LABELSIZE +rcParams['xtick.labelsize'] = TICK_LABELSIZE +rcParams['ytick.labelsize'] = TICK_LABELSIZE +rcParams['xtick.major.pad'] = 4 +rcParams['ytick.major.pad'] = 6 +rcParams['figure.subplot.bottom'] = DEF_AXIS_LEFT +rcParams['figure.subplot.left'] = DEF_AXIS_LEFT +rcParams['figure.subplot.right'] = DEF_AXIS_RIGHT +rcParams['lines.linewidth'] = 2 +rcParams['grid.color'] = COLOR_LIGHTGRAY +rcParams['grid.linewidth'] = 0.6 +rcParams['ps.useafm'] = True +rcParams['pdf.use14corefonts'] = True +#rcParams['text.usetex'] = True + diff --git a/util/plot_queue.py b/util/plot_queue.py new file mode 100644 index 0000000..89cb602 --- /dev/null +++ b/util/plot_queue.py @@ -0,0 +1,130 @@ +''' +Plot queue occupancy over time +''' + +from helper import * +import plot_defaults + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', + help="Queue timeseries output to one plot", + required=True, + action="store", + nargs='+', + dest="files") + +parser.add_argument('--maxy', + help="Max mbps on y-axis..", + type=int, + default=1000, + action="store", + dest="maxy") + +parser.add_argument('--miny', + help="Min mbps on y-axis..", + type=int, + default=0, + action="store", + dest="miny") + +parser.add_argument('--legend', '-l', + help="Legend to use if there are multiple plots. File names used as default.", + action="store", + nargs="+", + default=None, + dest="legend") + +parser.add_argument('--out', '-o', + help="Output png file for the plot.", + default=None, # Will show the plot + dest="out") + +parser.add_argument('-s', '--summarise', + help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.", + default=False, + dest="summarise", + action="store_true") + +parser.add_argument('--cdf', + help="Plot CDF of queue timeseries (first 10 and last 10 values are ignored)", + default=False, + dest="cdf", + action="store_true") + +parser.add_argument('--labels', + help="Labels for x-axis if summarising; defaults to file names", + required=False, + default=[], + nargs="+", + dest="labels") + +args = parser.parse_args() +if args.labels is None: + args.labels = args.files +if args.legend is None: + args.legend = args.files + +to_plot=[] +def get_style(i): + if i == 0: + return {'color': 'red'} + else: + return {'color': 'black', 'ls': '-.'} + +for i, f in enumerate(args.files): + data = read_list(f) + xaxis = map(float, col(0, data)) + start_time = xaxis[0] + xaxis = map(lambda x: x - start_time, xaxis) + qlens = map(float, col(1, data)) + if args.summarise or args.cdf: + to_plot.append(qlens[10:-10]) + else: + plt.plot(xaxis, qlens, label=args.legend[i], lw=2, **get_style(i)) + +plt.title("Queue sizes") +plt.ylabel("Packets") +plt.grid(True) +#yaxis = range(0, 1101, 50) +#ylabels = map(lambda y: str(y) if y%100==0 else '', yaxis) +#plt.yticks(yaxis, ylabels) +#plt.ylim((0,1100)) +plt.ylim((args.miny,args.maxy)) + +if args.summarise: + plt.xlabel("Link Rates") + plt.boxplot(to_plot) + xaxis = range(1, 1+len(args.files)) + plt.xticks(xaxis, args.labels) + for x in xaxis: + y = pc99(to_plot[x-1]) + print x, y + if x == 1: + s = '99pc: %d' % y + offset = (-20,20) + else: + s = str(y) + offset = (-10, 20) + plt.annotate(s, (x,y+1), xycoords='data', + xytext=offset, textcoords='offset points', + arrowprops=dict(arrowstyle="->")) +elif args.cdf: + for i,data in enumerate(to_plot): + xs, ys = cdf(map(int, data)) + plt.plot(xs, ys, label=args.legend[i], lw=2, **get_style(i)) + plt.ylabel("Fraction") + plt.xlabel("Packets") + plt.ylim((0, 1.0)) + plt.legend(args.legend, loc="upper left") + plt.title("") +else: + plt.xlabel("Seconds") + if args.legend: + plt.legend(args.legend, loc="upper left") + else: + plt.legend(args.files) + +if args.out: + plt.savefig(args.out) +else: + plt.show() diff --git a/util/plot_rate.py b/util/plot_rate.py new file mode 100644 index 0000000..bd5e99f --- /dev/null +++ b/util/plot_rate.py @@ -0,0 +1,164 @@ +from helper import * + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', + help="Rate timeseries output to one plot", + required=True, + action="store", + nargs='+', + dest="files") + +parser.add_argument('--legend', '-l', + help="Legend to use if there are multiple plots. File names used as default.", + action="store", + nargs="+", + default=None, + dest="legend") + +parser.add_argument('--out', '-o', + help="Output png file for the plot.", + default=None, # Will show the plot + dest="out") + +parser.add_argument('-s', '--summarise', + help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.", + default=False, + dest="summarise", + action="store_true") + +parser.add_argument('--labels', + help="Labels for x-axis if summarising; defaults to file names", + required=False, + default=[], + nargs="+", + dest="labels") + +parser.add_argument('--xlabel', + help="Custom label for x-axis", + required=False, + default=None, + dest="xlabel") + +parser.add_argument('--ylabel', + help="Custom label for y-axis", + required=False, + default=None, + dest="ylabel") + +parser.add_argument('-i', + help="Interfaces to plot (regex)", + default=".*", + dest="pat_iface") + +parser.add_argument('--rx', + help="Plot receive rates on the interfaces.", + default=False, + action="store_true", + dest="rx") + +parser.add_argument('--maxy', + help="Max mbps on y-axis..", + default=100, + action="store", + dest="maxy") + +parser.add_argument('--miny', + help="Min mbps on y-axis..", + default=0, + action="store", + dest="miny") + +parser.add_argument('--normalize', + help="normalise y-axis", + default=False, + action="store_true", + dest="normalise") + +args = parser.parse_args() +if args.labels is None: + args.labels = args.files + +pat_iface = re.compile(args.pat_iface) + +to_plot=[] +"""Output of bwm-ng csv has the following columns: +unix_timestamp;iface_name;bytes_out;bytes_in;bytes_total;packets_out;packets_in;packets_total;errors_out;errors_in +""" + +if args.normalise and args.labels == []: + raise "Labels required if summarising/normalising." + sys.exit(-1) + +bw = map(lambda e: int(e.replace('M','')), args.labels) +idx = 0 + +for f in args.files: + data = read_list(f) + #xaxis = map(float, col(0, data)) + #start_time = xaxis[0] + #xaxis = map(lambda x: x - start_time, xaxis) + #rate = map(float, col(2, data)) + rate = {} + column = 2 + if args.rx: + column = 3 + for row in data: + try: + ifname = row[1] + except: + break + if ifname not in ['eth0', 'lo']: + if not rate.has_key(ifname): + rate[ifname] = [] + try: + rate[ifname].append(float(row[column]) * 8.0 / (1 << 20)) + except: + break + + if args.summarise: + for k in rate.keys(): + if pat_iface.match(k): + print k + vals = filter(lambda e: e < 1500, rate[k][10:-10]) + if args.normalise: + vals = map(lambda e: e / bw[idx], vals) + idx += 1 + to_plot.append(vals) + else: + for k in sorted(rate.keys()): + if pat_iface.match(k): + print k + plt.plot(rate[k], label=k) + +plt.title("TX rates") +if args.rx: + plt.title("RX rates") + +if args.ylabel: + plt.ylabel(args.ylabel) +elif args.normalise: + plt.ylabel("Normalized BW") +else: + plt.ylabel("Mbps") + +plt.grid() +plt.legend() +plt.ylim((int(args.miny), int(args.maxy))) + +if args.summarise: + plt.boxplot(to_plot) + plt.xticks(range(1, 1+len(args.files)), args.labels) + +if not args.summarise: + if args.xlabel: + plt.xlabel(args.xlabel) + else: + plt.xlabel("Time") + if args.legend: + plt.legend(args.legend) + +if args.out: + plt.savefig(args.out) +else: + plt.show() + diff --git a/util/plot_rates.py b/util/plot_rates.py new file mode 100644 index 0000000..bc308d7 --- /dev/null +++ b/util/plot_rates.py @@ -0,0 +1,198 @@ +import os +from helper import * +from matplotlib.font_manager import FontProperties + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', + help="Rate timeseries output to one plot", + required=True, + action="store", + nargs='+', + dest="files") + +parser.add_argument('--legend', '-l', + help="Legend to use if there are multiple plots. File names used as default.", + action="store", + nargs="+", + default=None, + dest="legend") + +parser.add_argument('--out', '-o', + help="Output png file for the plot.", + default=None, # Will show the plot + dest="out") + +parser.add_argument('-s', '--summarise', + help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.", + default=False, + dest="summarise", + action="store_true") + +parser.add_argument('--labels', + help="Labels for x-axis if summarising; defaults to file names", + required=False, + default=[], + nargs="+", + dest="labels") + +parser.add_argument('--xlabel', + help="Custom label for x-axis", + required=False, + default=None, + dest="xlabel") + +parser.add_argument('--ylabel', + help="Custom label for y-axis", + required=False, + default=None, + dest="ylabel") + +parser.add_argument('-i', + help="Interfaces to plot (regex)", + default=".*", + dest="pat_iface") + +parser.add_argument('--rx', + help="Plot receive rates on the interfaces.", + default=False, + action="store_true", + dest="rx") + +parser.add_argument('--maxy', + help="Max mbps on y-axis..", + default=100, + action="store", + dest="maxy") + +parser.add_argument('--miny', + help="Min mbps on y-axis..", + default=0, + action="store", + dest="miny") + +parser.add_argument('--normalize', + help="normalise y-axis", + default=False, + action="store_true", + dest="normalise") + +parser.add_argument('--start', dest="start", default=None, type=int) +parser.add_argument('--length', dest="length", default=None, type=int) + +args = parser.parse_args() +if args.labels is None: + args.labels = args.files + +pat_iface = re.compile(args.pat_iface) + +to_plot=[] +"""Output of bwm-ng csv has the following columns: +unix_timestamp;iface_name;bytes_out;throughput;bytes_total;packets_out;packets_in;packets_total;errors_out;errors_in +""" + +if args.normalise and args.labels == []: + raise "Labels required if summarising/normalising." + sys.exit(-1) + +bw = map(lambda e: int(e.replace('M','')), args.labels) +idx = 0 + +rate = {} +for f in args.files: + if not os.path.exists(f): + continue + data = read_list(f) + #xaxis = map(float, col(0, data)) + #start_time = xaxis[0] + #xaxis = map(lambda x: x - start_time, xaxis) + #rate = map(float, col(2, data)) + column = 2 + if args.rx: + column = 3 + for row in data: + try: + ifname = row[1] + except: + break + if ifname not in ['eth0', 'lo']: + x = float(row[0]) + y = float(row[column]) / (1000) + if not rate.has_key(ifname): + rate[ifname] = ([], [],) + try: + rate[ifname][0].append(x) + rate[ifname][1].append(y) + except Exception as e: + print e + break + +earliest_time = -1 +for k in sorted(rate.keys()): + times = rate[k][0] + if earliest_time == -1 or times[0] < earliest_time: + earliest_time = times[0] +''' +UGH why doesn't this work +if args.start and args.length: + for k in sorted(rate.keys()): + data = rate[k] + new_data = [] + for i in range(len(data[0])): + if data[0][i]-earliest_time > args.start and data[0][i]-earliest_time) < (args.start + args.length): + new_data.append([data[0][i], data[1][i]]) + rate[k] = new_data + print new_data +''' + +for k in sorted(rate.keys()): + data = rate[k] + times = rate[k][0] + time_offsets = [t - earliest_time for t in times] + values = rate[k][1] + + if pat_iface.match(k): + print k + if k == "video-flow-throughput": + plt.plot(time_offsets, values, label=k, marker='x', color='b') + elif k == "video-playback-rate": + plt.plot(time_offsets, values, label=k, marker='+', color='r') + elif k == "competing-throughput": + plt.plot(time_offsets, values, 'k--', label=k, color='g') + else: + plt.plot(time_offsets, values, label=k, color='y') + +plt.title("TX rates") +if args.rx: + plt.title("RX rates") + +if args.ylabel: + plt.ylabel(args.ylabel) +elif args.normalise: + plt.ylabel("Normalized BW") +else: + plt.ylabel("kbps") + +plt.grid() +fontP = FontProperties() +fontP.set_size('small') +plt.legend(prop = fontP, bbox_to_anchor=(0., 1.05, 1., .102), loc=3, + ncol=3, mode="expand", borderaxespad=0.) +plt.ylim((int(args.miny), int(args.maxy))) +if args.summarise: + plt.boxplot(to_plot) + plt.xticks(range(1, 1+len(args.files)), args.labels) + +if not args.summarise: + if args.xlabel: + plt.xlabel(args.xlabel) + else: + plt.xlabel("Time") + if args.legend: + plt.legend(args.legend) + +if args.out: + plt.savefig(args.out) +else: + plt.show() + + diff --git a/util/plot_request_interval.py b/util/plot_request_interval.py new file mode 100644 index 0000000..988d41b --- /dev/null +++ b/util/plot_request_interval.py @@ -0,0 +1,171 @@ +from helper import * + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', + help="Rate timeseries output to one plot", + required=True, + action="store", + nargs='+', + dest="files") + +parser.add_argument('--legend', '-l', + help="Legend to use if there are multiple plots. File names used as default.", + action="store", + nargs="+", + default=None, + dest="legend") + +parser.add_argument('--out', '-o', + help="Output png file for the plot.", + default=None, # Will show the plot + dest="out") + +parser.add_argument('-s', '--summarise', + help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.", + default=False, + dest="summarise", + action="store_true") + +parser.add_argument('--labels', + help="Labels for x-axis if summarising; defaults to file names", + required=False, + default=[], + nargs="+", + dest="labels") + +parser.add_argument('--xlabel', + help="Custom label for x-axis", + required=False, + default=None, + dest="xlabel") + +parser.add_argument('--ylabel', + help="Custom label for y-axis", + required=False, + default=None, + dest="ylabel") + +parser.add_argument('-i', + help="Interfaces to plot (regex)", + default=".*", + dest="pat_iface") + +parser.add_argument('--rx', + help="Plot receive rates on the interfaces.", + default=False, + action="store_true", + dest="rx") + +parser.add_argument('--maxy', + help="Max mbps on y-axis..", + default=100, + action="store", + dest="maxy") + +parser.add_argument('--miny', + help="Min mbps on y-axis..", + default=0, + action="store", + dest="miny") + +parser.add_argument('--normalize', + help="normalise y-axis", + default=False, + action="store_true", + dest="normalise") + +args = parser.parse_args() +if args.labels is None: + args.labels = args.files + +pat_iface = re.compile(args.pat_iface) + +to_plot=[] +"""Output of bwm-ng csv has the following columns: +unix_timestamp;iface_name;bytes_out;bytes_in;bytes_total;packets_out;packets_in;packets_total;errors_out;errors_in +""" + +if args.normalise and args.labels == []: + raise "Labels required if summarising/normalising." + sys.exit(-1) + +bw = map(lambda e: int(e.replace('M','')), args.labels) +idx = 0 + +earliest_time = -1 +for f in args.files: + data = read_list(f) + #xaxis = map(float, col(0, data)) + #start_time = xaxis[0] + #xaxis = map(lambda x: x - start_time, xaxis) + #rate = map(float, col(2, data)) + rate = {} + column = 2 + if args.rx: + column = 3 + for row in data: + try: + ifname = row[1] + except: + break + if ifname not in ['eth0', 'lo']: + if not rate.has_key(ifname): + rate[ifname] = ([],[]) + try: + if (earliest_time == -1): + earliest_time = float(row[0]) + if float(row[0]) - earliest_time > 300: + continue + rate[ifname][0].append(float(row[0]) - earliest_time) + rate[ifname][1].append(float(row[column])) + except: + break + + + if args.summarise: + for k in rate.keys(): + if pat_iface.match(k): + print k + vals = filter(lambda e: e < 1500, rate[k][10:-10]) + if args.normalise: + vals = map(lambda e: e / bw[idx], vals) + idx += 1 + to_plot.append(vals) + else: + for k in sorted(rate.keys()): + if pat_iface.match(k): + print k + plt.plot(rate[k][0], rate[k][1], label=k) + +plt.title("Request Interval") +if args.rx: + plt.title("RX rates") + +if args.ylabel: + plt.ylabel(args.ylabel) +elif args.normalise: + plt.ylabel("Normalized BW") +else: + plt.ylabel("Request Interval (s)") + +plt.grid() +plt.legend() +plt.ylim((int(args.miny), int(args.maxy))) + +if args.summarise: + plt.boxplot(to_plot) + plt.xticks(range(1, 1+len(args.files)), args.labels) + +if not args.summarise: + if args.xlabel: + plt.xlabel(args.xlabel) + else: + plt.xlabel("Time") + if args.legend: + plt.legend(args.legend) + +if args.out: + plt.savefig(args.out) +else: + plt.show() + diff --git a/util/plot_tcpprobe.py b/util/plot_tcpprobe.py new file mode 100644 index 0000000..21de9c5 --- /dev/null +++ b/util/plot_tcpprobe.py @@ -0,0 +1,136 @@ +from helper import * +from collections import defaultdict +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--sport', help="Enable the source port filter (Default is dest port)", action='store_true', dest="sport", default=False) +parser.add_argument('-p', '--port', dest="port", default='5001') +parser.add_argument('-f', dest="files", nargs='+', required=True) +parser.add_argument('-o', '--out', dest="out", default=None) +parser.add_argument('-s', '--start', dest="start", default=None, type=int) +parser.add_argument('-l', '--length', dest="length", default=None, type=int) +parser.add_argument('-H', '--histogram', dest="histogram", + help="Plot histogram of sum(cwnd_i)", + action="store_true", + default=False) + +args = parser.parse_args() + +def first(lst): + return map(lambda e: e[0], lst) + +def second(lst): + return map(lambda e: e[1], lst) + +""" +Sample line: +(pre-Linux 3.12): +2.221032535 10.0.0.2:39815 10.0.0.1:5001 32 0x1a2a710c 0x1a2a387c 11 2147483647 14592 85 +(post-Linux 3.12): +0.004313854 192.168.56.101:22 192.168.56.1:57321 32 0xa34f92b0 0xa34f9240 10 2147483647 131024 1 43520 + +source code: http://lxr.free-electrons.com/source/net/ipv4/tcp_probe.c?v=3.12 +0: Time in seconds +1: Source IP:Port +2: Dest IP: Port +3: Packet length (bytes) +4: snd_nxt +5: snd_una +6: snd_cwnd +7: ssthr +8: snd_wnd +9: srtt +10: rcv_wnd (3.12 and later) +""" +def parse_file(f): + num_fields = 10 + linux_ver = os.uname()[2].split('.')[:2] # example '3.13.0-24-generic' + ver_1, ver_2 = [int(ver_i) for ver_i in linux_ver] + if ver_1 == 3 and ver_2 >= 12: + num_fields = 11 + + times = defaultdict(list) + cwnd = defaultdict(list) + srtt = [] + for l in open(f).xreadlines(): + fields = l.strip().split(' ') + if len(fields) != num_fields: + break + if args.start and args.length: + pt_time = float(fields[0]) + if pt_time < args.start or pt_time > args.start + args.length: + continue + if not args.sport: + if fields[2].split(':')[1] != args.port: + continue + else: +# print "using sport %s (compare with %s)" % (args.port, fields[1].split(':')[1]) + if fields[1].split(':')[1] != args.port: + continue + sport = int(fields[1].split(':')[1]) + times[sport].append(float(fields[0])) + + c = int(fields[6]) + cwnd[sport].append(c * 1480 / 1024.0) + srtt.append(int(fields[-1])) + return times, cwnd + +added = defaultdict(int) +events = [] + +def plot_cwnds(ax): + global events + for f in args.files: + times, cwnds = parse_file(f) + for port in sorted(cwnds.keys()): + t = times[port] + cwnd = cwnds[port] + + events += zip(t, [port]*len(t), cwnd) + ax.plot(t, cwnd) + + events.sort() +total_cwnd = 0 +cwnd_time = [] + +min_total_cwnd = 10**10 +max_total_cwnd = 0 +totalcwnds = [] + +m.rc('figure', figsize=(16, 6)) +fig = plt.figure() +plots = 1 +if args.histogram: + plots = 2 + +axPlot = fig.add_subplot(1, plots, 1) +plot_cwnds(axPlot) + +for (t,p,c) in events: + if added[p]: + total_cwnd -= added[p] + total_cwnd += c + cwnd_time.append((t, total_cwnd)) + added[p] = c + totalcwnds.append(total_cwnd) + +axPlot.plot(first(cwnd_time), second(cwnd_time), lw=2, label="$\sum_i W_i$") +axPlot.grid(True) +#axPlot.legend() +axPlot.set_xlabel("seconds") +axPlot.set_ylabel("cwnd KB") +axPlot.set_title("TCP congestion window (cwnd) timeseries") + +if args.histogram: + axHist = fig.add_subplot(1, 2, 2) + n, bins, patches = axHist.hist(totalcwnds, 50, normed=1, facecolor='green', alpha=0.75) + + axHist.set_xlabel("bins (KB)") + axHist.set_ylabel("Fraction") + axHist.set_title("Histogram of sum(cwnd_i)") + +if args.out: + print 'saving to', args.out + plt.savefig(args.out) +else: + plt.show()