[dts] [PATCH V1] Add sw eventdev automation test suite

Yuwei Zhang yuwei1.zhang at intel.com
Thu Nov 2 16:02:09 CET 2017


add software eventdev automation test suite.

Signed-off-by: Yuwei Zhang <yuwei1.zhang at intel.com>
---
 tests/TestSuite_sw_eventdev_pipeline_sample.py | 250 +++++++++++++++++++++++++
 1 file changed, 250 insertions(+)
 create mode 100644 tests/TestSuite_sw_eventdev_pipeline_sample.py

diff --git a/tests/TestSuite_sw_eventdev_pipeline_sample.py b/tests/TestSuite_sw_eventdev_pipeline_sample.py
new file mode 100644
index 0000000..fefc470
--- /dev/null
+++ b/tests/TestSuite_sw_eventdev_pipeline_sample.py
@@ -0,0 +1,250 @@
+"""
+DPDK Test Suite.
+Tests for sw eventdev pipeline sample.
+"""
+
+import utils
+import time
+import re
+from scapy.all import rdpcap
+from scapy.layers.inet import Ether, IP, TCP, UDP, ICMP
+
+from test_case import TestCase
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from ssh_connection import SSHConnection
+
+
+class TestSWEventdevPipeline(TestCase, IxiaPacketGenerator):
+
+    frame_sizes = [64, 128]
+
+    perf_test_cycles = [{'pipeline type': 'atomic', 'flow num': 1, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'atomic', 'flow num': 4000, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'atomic', 'flow num': 1, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'atomic', 'flow num': 4000, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'atomic', 'flow num': 1, 'stage num': 4, 'Mpps': {}},
+                        {'pipeline type': 'atomic', 'flow num': 4000, 'stage num': 4, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 1, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 4000, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 1, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 4000, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 1, 'stage num': 4, 'Mpps': {}},
+                        {'pipeline type': 'ordered', 'flow num': 4000, 'stage num': 4, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 1, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 4000, 'stage num': 1, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 1, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 4000, 'stage num': 2, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 1, 'stage num': 4, 'Mpps': {}},
+                        {'pipeline type': 'parallel', 'flow num': 4000, 'stage num': 4, 'Mpps': {}}]
+
+    load_balance_test_cycles = [{'pipeline type': 'atomic', 'flow num': 16, 'stage num': 2, 'worker num': 4, 'threshold': 20},
+                                {'pipeline type': 'atomic', 'flow num': 1024, 'stage num': 2, 'worker num': 4, 'threshold': 5},
+                                {'pipeline type': 'atomic', 'flow num': 4000, 'stage num': 2, 'worker num': 4, 'threshold': 3},
+                                {'pipeline type': 'atomic', 'flow num': 10000, 'stage num': 2, 'worker num': 4, 'threshold': 20},
+                                {'pipeline type': 'atomic', 'flow num': 16, 'stage num': 4, 'worker num': 8, 'threshold': 25},
+                                {'pipeline type': 'atomic', 'flow num': 1024, 'stage num': 4, 'worker num': 8, 'threshold': 3},
+                                {'pipeline type': 'atomic', 'flow num': 4000, 'stage num': 4, 'worker num': 8, 'threshold': 3},
+                                {'pipeline type': 'atomic', 'flow num': 10000, 'stage num': 4, 'worker num': 8, 'threshold': 3},
+                                {'pipeline type': 'ordered', 'flow num': 1, 'stage num': 4, 'worker num': 8, 'threshold': 1},
+                                {'pipeline type': 'ordered', 'flow num': 10000, 'stage num': 4, 'worker num': 8, 'threshold': 1},
+                                {'pipeline type': 'parallel', 'flow num': 1, 'stage num': 4, 'worker num': 8, 'threshold': 1},
+                                {'pipeline type': 'parallel', 'flow num': 10000, 'stage num': 4, 'worker num': 8, 'threshold': 1}]
+
+    test_keep_packet_order_cycles = [{'pipeline type': 'parallel', 'stage num': 1, 'flow num': 16},
+                                     {'pipeline type': 'atomic', 'stage num': 1, 'flow num': 1}]
+    flow_num = 1
+
+    worker_num = 4
+    worker_index = []
+
+    pipeline_type = 'atomic'
+
+    stage_num = 1
+
+    load_balance_max_gap = 20
+
+    perf_table_header = ['Pipeline Type', 'Stage Num', 'Flow Num', 'MPPS']
+    load_balance_table_header = ['Pipeline Type', 'Stage Num', 'Worker Num', 'Flow Num', 'Max Percent', 'Min Percent', 'Gap', 'Result']
+
+    header_size = HEADER_SIZE['eth'] + HEADER_SIZE[
+            'ip'] + HEADER_SIZE['tcp']
+
+    def set_up_all(self):
+        self.tester.extend_external_packet_generator(TestSWEventdevPipeline, self)
+        self.dut_ports = self.dut.get_ports()
+        self.verify(len(self.dut_ports) >= 1, "Insufficient ports number!")
+
+        output = self.dut.build_dpdk_apps("./examples/eventdev_pipeline_sw_pmd")
+        self.verify("Error" not in output and "No such" not in output, "Compilation Error!")
+        self.core_config = '1S/16C/1T'
+        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
+
+    def set_up(self):
+        pass
+
+    def create_worker_coremask(self, worker_num):
+        for i in range(0, self.worker_num):
+            self.worker_index.append(i + 4)
+        return utils.create_mask(self.worker_index)
+
+    def run_eventdev_sample(self, rx_core_mask, tx_core_mask, scheduler_core_mask,
+                            worker_core_mask, stage_num, pipeline_type='atomic'):
+        core_mask = utils.create_mask(self.dut.get_core_list(self.core_config, socket=self.ports_socket))
+        if pipeline_type == 'atomic':
+            pipelinee_symbol = ''
+        elif pipeline_type == 'ordered':
+            pipelinee_symbol = 'o'
+        elif pipeline_type == 'parallel':
+            pipelinee_symbol = 'p'
+
+        cmdline = './examples/eventdev_pipeline_sw_pmd/build/app/eventdev_pipeline_sw_pmd --vdev event_sw0 -n {0} -c {1} -- -r{2} -t{3} -e{4} -w {5} -s {6} -n 0 -{7} -W1000'.format(
+                self.dut.get_memory_channels(), core_mask, rx_core_mask, tx_core_mask, scheduler_core_mask,
+                worker_core_mask, str(stage_num), pipelinee_symbol)
+
+        output = self.dut.send_expect(cmdline, "using eventdev port", 60)
+
+    def ixia_send_traffic(self, frame_size=64, flow_num=1, type='performance'):
+        self.flow_num = flow_num
+        tgen_input = []
+        tgen_input.append((self.tester.get_local_port(self.dut_ports[0]),
+                           self.tester.get_local_port(self.dut_ports[0]),
+                           "test.pcap"))
+        payload_size = frame_size - self.header_size
+        ip_start = '192.168.0.1'
+        pkts = []
+        if type == 'perf':
+            self.logger.info("Running with frame size %d" % frame_size)
+            self.tester.scapy_append(
+                'wrpcap("test.pcap", Ether()/IP()/TCP()/("X"*%d))' % payload_size
+            )
+            self.tester.scapy_execute()
+            _, pps = self.tester.traffic_generator_throughput(tgen_input, rate_percent=100, delay=10)
+            return pps
+
+        elif type == 'keep_packet_order':
+            self.tester.scapy_append(
+                'wrpcap("test.pcap", Ether()/IP()/TCP()/("X"*%d))' % payload_size
+            )
+            self.tester.scapy_execute()
+            self.verify(self.tester.verify_packet_order(tgen_input) == 0, "Packets is out of order!")
+            self.dut.send_expect("^C", '#')
+        else:
+            self.logger.info("Unsupported ixia traffic type")
+            return 0
+
+    def increase_ip_address(self, ip_start, size):
+        ip = map(int, ip_start.split('.'))
+        ip_increased = []
+        for i in range(size + 1):
+            num = 3
+            ip[num] += 1
+            while ip[num] == 256 and num != -1:
+                ip[num] = 1
+                num -= 1
+                ip[num] += 1
+                if num == -1:
+                    break
+            ip_item = ''
+            for i in range(4):
+                if i != 3:
+                    ip_item += str(ip[i])
+                    ip_item += '.'
+                else:
+                    ip_item += str(ip[i])
+            ip_increased.append(ip_item)
+        return ip_increased
+
+    def ip(self, port, frag, src, proto, tos, dst, chksum, len, options, version, flags, ihl, ttl, id):
+        self.add_tcl_cmd("protocol config -name ip")
+        self.add_tcl_cmd('ip config -sourceIpAddr "%s"' % src)
+        self.add_tcl_cmd("ip config -sourceIpAddrMode ipIdle")
+        self.add_tcl_cmd('ip config -destIpAddr "%s"' % dst)
+        self.add_tcl_cmd("ip config -destIpAddrMode ipIncrHost")
+        self.add_tcl_cmd('ip config -destIpAddrRepeatCount "%d"' % int(self.flow_num))
+        self.add_tcl_cmd("ip config -ttl %d" % ttl)
+        self.add_tcl_cmd("ip config -totalLength %d" % len)
+        self.add_tcl_cmd("ip config -fragment %d" % frag)
+        self.add_tcl_cmd("ip config -ipProtocol ipV4ProtocolReserved255")
+        self.add_tcl_cmd("ip config -identifier %d" % id)
+        self.add_tcl_cmd("stream config -framesize %d" % (len + 18))
+        self.add_tcl_cmd("ip set %d %d %d" % (self.chasId, port['card'], port['port']))
+
+    def get_load_balance_result(self, output, threshold):
+        workers_workload = []
+        i = 0
+        for value in re.findall(r':.*%', output):
+            workers_workload.append(float(value[1:len(value) - 2].strip()))
+        self.logger.info('---------------------------')
+        for value in workers_workload:
+            self.logger.info('worker %d : %f %%' % (i, value))
+            i += 1
+        self.logger.info('---------------------------')
+        gap = max(workers_workload) - min(workers_workload)
+        if gap > threshold:
+            self.logger.info('result: Failed')
+            return max(workers_workload), min(workers_workload), gap, "Failed"
+        else:
+            self.logger.info('result: Pass')
+            return max(workers_workload), min(workers_workload), gap, "Pass"
+
+    def test_perf_sample_load_balance(self):
+        frame_size = 64
+        result_percent = []
+        self.result_table_create(self.load_balance_table_header)
+        for test_cycle in self.load_balance_test_cycles:
+            worker_coremask = self.create_worker_coremask(test_cycle['worker num'])
+            self.run_eventdev_sample('0x2', '0x4', '0x8', worker_coremask, test_cycle['stage num'], test_cycle['pipeline type'])
+            self.ixia_send_traffic(64, test_cycle['flow num'], type='perf')
+
+            time.sleep(10)
+
+            output = self.dut.send_expect('^C', '#')
+            self.logger.info('stage type: %s stage num: %d flow num: %d' % (test_cycle['pipeline type'], test_cycle['stage num'], test_cycle['flow num']))
+            max_percent, min_percent, gap, result = self.get_load_balance_result(output, test_cycle['threshold'])
+            table_row = []
+            table_row.append(test_cycle['pipeline type'])
+            table_row.append(test_cycle['stage num'])
+            table_row.append(test_cycle['worker num'])
+            table_row.append(test_cycle['flow num'])
+            table_row.append(max_percent)
+            table_row.append(min_percent)
+            table_row.append(gap)
+            table_row.append(result)
+            self.result_table_add(table_row)
+        self.result_table_print()
+
+    def test_perf_sample_keep_packet_order(self):
+        frame_size = 64
+        default_worker_num = 8
+        worker_coremask = self.create_worker_coremask(default_worker_num)
+        for test_cycle in self.test_keep_packet_order_cycles:
+            self.run_eventdev_sample('0x2', '0x4', '0x8', worker_coremask, test_cycle['stage num'], test_cycle['pipeline type'])
+            self.ixia_send_traffic(64, flow_num=test_cycle['flow num'], type='keep_packet_order')
+
+    def test_perf_pipeline_sample(self):
+        default_worker_num = 8
+        worker_coremask = self.create_worker_coremask(default_worker_num)
+        for test_cycle in self.perf_test_cycles:
+            self.run_eventdev_sample('0x2', '0x4', '0x8', worker_coremask, test_cycle['stage num'], test_cycle['pipeline type'])
+            for frame_size in self.frame_sizes:
+                pps = self.ixia_send_traffic(frame_size, test_cycle['flow num'], 'perf')
+                Mpps = pps / 1000000.0
+                test_cycle['Mpps'][frame_size] = float('%.3f' % Mpps)
+            self.dut.send_expect('^C', '#')
+
+        self.result_table_create(self.perf_table_header)
+        for test_cycle in self.perf_test_cycles:
+            table_row = []
+            table_row.append(test_cycle['pipeline type'])
+            table_row.append(test_cycle['stage num'])
+            table_row.append(test_cycle['flow num'])
+            table_row.append(test_cycle['Mpps'])
+            self.result_table_add(table_row)
+        self.result_table_print()
+
+    def tear_down(self):
+        pass
+
+    def tear_down_all(self):
+        pass
-- 
2.14.1.windows.1



More information about the dts mailing list