[dts] [PATCH V1 2/4] ipfix_flow_classify: upload automation script
yufengx.mo at intel.com
yufengx.mo at intel.com
Wed Jun 6 07:34:44 CEST 2018
From: yufengmx <yufengx.mo at intel.com>
This automation script is for flow classify feature.
DPDK provides a Flow Classification library that provides the ability
to classify an input packet by matching it against a set of Flow rules.
The implementation supports counting of IPv4 5-tuple packets which match a
particular Flow rule only.
flow_classify is the tool to call flow_classify lib for group of packets,
just after receiving them or before transmitting them.
Signed-off-by: yufengmx <yufengx.mo at intel.com>
---
tests/TestSuite_ipfix_flow_classify.py | 714 +++++++++++++++++++++++++++++++++
1 file changed, 714 insertions(+)
create mode 100644 tests/TestSuite_ipfix_flow_classify.py
diff --git a/tests/TestSuite_ipfix_flow_classify.py b/tests/TestSuite_ipfix_flow_classify.py
new file mode 100644
index 0000000..f8205d1
--- /dev/null
+++ b/tests/TestSuite_ipfix_flow_classify.py
@@ -0,0 +1,714 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import time
+import re
+import random
+import inspect, traceback
+
+from datetime import datetime
+from socket import htons, htonl
+
+from packet import Packet, NVGRE, IPPROTO_NVGRE
+from scapy.sendrecv import sendp
+from scapy.utils import wrpcap, rdpcap, hexstr
+
+import utils
+from test_case import TestCase
+from exception import TimeoutException, VerifyFailure
+from settings import TIMEOUT
+from pmd_output import PmdOutput
+from settings import HEADER_SIZE
+from serializer import Serializer
+
+class ExecBinProcess(object):
+
+ def __init__(self, **kwargs):
+ # initialize process parameter
+ self.dut = kwargs.get('dut')
+ self.interactive = kwargs.get('interactive') or False
+ self.name = kwargs.get('name')
+ self.output = kwargs.get('output')
+ self.target_code = kwargs.get('target_src')
+ self.target_name = kwargs.get('target_name')
+ self.logger = kwargs.get('logger')
+ # session command
+ self.default_prompt = ">" if self.interactive else '# '
+ self.console = self.execute_dut_cmds
+ # initialize process
+ self.status = 'close'
+ self.process_pid = None
+ self.output_log = None
+ #
+ self.bin = self.compile(self.target_code, self.name)
+ self.bin_name = os.path.basename(self.bin)
+
+ def execute_dut_cmds(self, cmds):
+ if len(cmds) == 0:
+ return
+ if len(cmds) > 1:
+ outputs = []
+ else:
+ outputs = ''
+ for item in cmds:
+ expected_items = item[1]
+ if expected_items and isinstance(expected_items, (list, tuple)):
+ check_output = True
+ expected_str = expected_items[0] or self.default_prompt
+ else:
+ check_output = False
+ expected_str = expected_items or self.default_prompt
+ #----------------
+ if len(item) == 3:
+ timeout = int(item[2])
+ output = self.dut.send_expect(item[0], expected_str, timeout)
+ output = self.dut.get_session_output(timeout) if not output \
+ else output
+ else:
+ output = self.dut.send_expect(item[0], expected_str)
+ output = self.dut.get_session_output() if not output else output
+ #--------------------
+ if len(cmds) > 1:
+ outputs.append(output)
+ else:
+ outputs = output
+ if check_output and len(expected_items) >= 2:
+ self.logger.info(output)
+ expected_output = expected_items[1]
+ if len(expected_items) == 2:
+ check_type = True
+ else:
+ check_type = expected_items[2]
+
+ if check_type and expected_output in output:
+ msg = "expected '{0}' is in output".format(expected_output)
+ self.logger.info(msg)
+ elif not check_type and expected_output not in output:
+ msg = "unexpected '{0}' is not in output".format(
+ expected_output)
+ self.logger.info(msg)
+ else:
+ status = "isn't in" if check_type else "is in"
+ msg = "[{0}] {1} output".format(expected_output, status)
+ self.logger.error(msg)
+ raise VerifyFailure(msg)
+
+ time.sleep(2)
+ return outputs
+
+ def compile(self, target_code, name):
+ key_words = ['build', self.target_name]
+ tool_path = os.sep.join([target_code, 'examples', name])
+ cmds = []
+ cmds.append(['make -C {0}'.format(tool_path), '', 15])
+ self.console(cmds)
+ # check executable binary file
+ exec_bin = self.get_exec_bin_file(tool_path, key_words)
+
+ if not exec_bin or not os.path.exists(exec_bin):
+ msg = 'expected tool <{0}> does not exist'.format(name)
+ self.logger.error(msg)
+ raise VerifyFailure(msg)
+
+ return exec_bin
+
+ def get_exec_bin_file(self, tool_path, key_words):
+ bin_dir = []
+ for key_word in key_words:
+ cmds = []
+ cmds.append(['find {0} -name {1}'.format(tool_path, key_word),
+ '', 5])
+ output = self.console(cmds)
+ if output == '':
+ continue
+ bin_dir.extend(output.splitlines())
+ for dir in bin_dir:
+ cmds = []
+ cmds.append(["ls -F {0} | grep '*'".format(dir), '', 5])
+ exec_file = self.console(cmds)
+ exec_bin = os.sep.join([dir, exec_file[:-1]])
+ msg = "binary file is <{0}>".format(exec_bin)
+ self.logger.info(msg)
+ return exec_bin
+ else:
+ return None
+
+ def check_process(self, process_name, check_status):
+ kill_session = self.dut.new_session()
+ # check subprocess in task space
+ cmd = ("ps aux | grep -i '%s' | "
+ "grep -v grep | awk {'print $2'}")% (process_name)
+ out = kill_session.send_expect(cmd, '# ', 5)
+ if out != "":
+ self.process_pid = out.splitlines()[0]
+ self.logger.info("{0}'s pid is {1}".format(self.bin_name,
+ self.process_pid))
+ status = True
+ else:
+ status = False
+
+ kill_session.close()
+
+ if check_status == 'start' and not status:
+ raise_flg = True
+ elif check_status == 'close' and status:
+ raise_flg = True
+ else:
+ raise_flg = False
+
+ if raise_flg:
+ raise VerifyFailure("{0} {1} failed".format(process_name,
+ check_status))
+ else:
+ self.logger.info("{0} {1} success".format(process_name,
+ check_status))
+
+ return status
+
+ def start(self, eal_option=''):
+ if self.status == 'running':
+ return
+ if self.interactive:
+ pass
+ else:
+ cmds =[['{0} {1} & 2>&1'.format(self.bin, eal_option),
+ 'table_entry_delete succeeded', 15],]
+ self.console(cmds)
+ time.sleep(10)
+ ############################
+ # check if process has bootep up
+ self.check_process(self.bin, "start")
+ self.status = 'running'
+
+ def close(self, log="output.log"):
+ output = self.dut.get_session_output()
+ with open(log, 'wb') as fp:
+ fp.write(output)
+ if self.status == 'close':
+ return None
+ cmds =[['kill -TERM {0}'.format(self.process_pid), ''],]
+ output = self.console(cmds)
+ time.sleep(10)
+ self.check_process(self.bin, 'close')
+ self.status = 'close'
+
+ return output
+#############
+
+#############
+class TestIpfixFlowClassify(TestCase):
+
+ def send_packets_by_ixia(self, **kwargs):
+ tester_port = kwargs.get('tx_intf')
+ count = kwargs.get('count', 1)
+ traffic_type = kwargs.get('traffic_type', 'normal')
+ traffic_time = kwargs.get('traffic_time', 0)
+ rate_percent = kwargs.get('rate_percent', float(100))
+ #---------------------------------------------------------------
+ send_pkts = []
+ self.tgen_input = []
+ tgen_input = self.tgen_input
+ send_pkts = kwargs.get('stream')
+ pcap = self.target_source + os.sep + 'ixia.pcap'
+ wrpcap(pcap, send_pkts)
+ #-----------------------------------------------------------
+ # set packet for send
+ # pause frame basic configuration
+ pause_time = 65535
+ pause_rate = 0.50
+ # run ixia testing
+ frame_size = 64
+ # calculate number of packets
+ expect_pps = self.wirespeed(self.nic, frame_size, 1) * 1000000.0
+ # get line rate
+ linerate = expect_pps * (frame_size + 20) * 8
+ # calculate default sleep time for one pause frame
+ sleep = (1 / linerate) * pause_time * 512
+ # calculate packets dropped in sleep time
+ self.n_pkts = int((sleep / (1 / expect_pps)) * (1 / pause_rate))
+ #----------------------------------------------------------------
+ tgen_input.append((tester_port,
+ tester_port,
+ pcap))
+ # run latency stat statistics
+ self.rate_percent = rate_percent
+ self.pktgen_status = 'running'
+ #if traffic_type == 'burst':
+ stream_configs = kwargs.get('stream configs', None)
+ if not stream_configs:
+ raise VerifyFailure("no stream configs set")
+ self.tester.burst_traffic_generator_throughput(
+ tgen_input,
+ rate_percent,
+ **stream_configs)
+ # move stop method in packet thread
+ if traffic_time:
+ time.sleep(traffic_time)
+ result = self.stop_ixia()
+
+ return result
+
+ def stop_ixia(self, data_types='packets'):
+ # get ixia statistics
+ if self.pktgen_status != 'running':
+ return
+ try:
+ line_rate = self.tester.get_port_line_rate()
+ stop_traffic = self.tester.stop_traffic_generator_throughput_loop
+ rx_bps, rx_pps = stop_traffic(self.tgen_input)
+ output = self.tester.traffic_get_port_stats(self.tgen_input)
+ cur_data = {}
+ cur_data['ixia statistics'] = []
+ append = cur_data['ixia statistics'].append
+ append('send packets: {0}'.format(output[0]))
+ append('line_rate: {0}'.format(line_rate[0]))
+ append('rate_percent: {0}%'.format(self.rate_percent))
+ except Exception as e:
+ msg = traceback.format_exc()
+ self.logger.error(msg)
+ finally:
+ self.pktgen_status = 'stop'
+ return cur_data
+
+ def get_pktgen(self, name):
+ pkt_gens = {'ixia': self.send_packets_by_ixia}
+ pkt_generator = pkt_gens.get(name)
+
+ return pkt_generator
+ #
+ # Test cases.
+ #
+ def set_up_all(self):
+ """
+ Run before each test suite
+ """
+ #------------------------------------------------------------------
+ # initialize ports topology
+ self.dut_ports = self.dut.get_ports()
+ self.port_mask = utils.create_mask(self.dut_ports)
+ self.verify(len(self.dut_ports) >= 1, "Insufficient ports")
+ self.target_source = self.dut.base_dir
+ # get output path
+ if self.logger.log_path.startswith(os.sep):
+ output_path = self.logger.log_path
+ else:
+ cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ output_path = cur_path + os.sep + self.logger.log_path
+ #
+ fcPsInfo = {
+ 'dut': self.dut,
+ 'name': 'flow_classify',
+ 'target_src': self.target_source,
+ 'target_name': self.dut.target,
+ 'output': output_path,
+ 'logger': self.logger,}
+ self.flow_classify = ExecBinProcess(**fcPsInfo)
+ self.output_log = None
+ #------------------------------------------------------------------
+ # initialize ixia session
+ self.pktgen_status = 'stop'
+ #------------------------------------------------------------------
+ # initialize packet generator
+ if self._enable_perf:
+ self.pktgen_name = 'ixia'
+ else:
+ self.pktgen_name = 'scapy_mix'
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ pass
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ pass
+
+ def get_pkt_len(self, pkt_type):
+ # packet size
+ frame_size = FRAME_SIZE_256
+ headers_size = sum(map(lambda x: HEADER_SIZE[x],
+ ['eth', 'ip', pkt_type]))
+ pktlen = frame_size - headers_size
+ return pktlen
+
+ def set_stream(self, stm_names=None):
+ '''
+ '''
+ #----------------------------------------------------------------------
+ # set streams for traffic
+ pkt_configs = {
+ # UDP_1:
+ # Frame Data/Protocols: Ethernet 2 0800, IPv4,UDP/IP, Fixed 64.
+ # IPv4 Header Page: Dest Address: 2.2.2.7 Src Address: 2.2.2.3
+ # UDP Header: Src Port: 32 Dest Port: 33
+ #
+ # Stream Control: Stop after this Stream, Packet Count 32.
+ #
+ 'UDP_1': {
+ 'type': 'UDP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '2.2.2.3', 'dst': '2.2.2.7'},
+ 'udp': {'src': 32, 'dst': 33},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('udp')}}},
+ # UDP_2:
+ # Frame Data/Protocols: Ethernet 2 0800, IPv4,UDP/IP, Fixed 64.
+ # IPv4 Header Page: Dest Address: 9.9.9.7 Src Address: 9.9.9.3
+ # UDP Header: Src Port: 32 Dest Port: 33
+ #
+ # Stream Control: Stop after this Stream, Packet Count 32.
+ #
+ 'UDP_2':{
+ 'type': 'UDP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.9.9.3', 'dst': '9.9.9.7'},
+ 'udp': {'src': 32, 'dst': 33},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('udp')}}},
+ 'invalid_UDP':{
+ 'type': 'UDP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.8.7.6', 'dst': '192.168.0.36'},
+ 'udp': {'src': 10, 'dst': 11},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('udp')}}},
+ # TCP_1:
+ # Frame Data/Protocols: Ethernet 2 0800, IPv4,TCP/IP, Fixed 64.
+ # IPv4 Header Page: Dest Address: 9.9.9.7 Src Address: 9.9.9.3
+ # TCP Header: Src Port: 32 Dest Port: 33
+ #
+ # Stream Control: Stop after this Stream, Packet Count 32.
+ #
+ 'TCP_1':{
+ 'type': 'TCP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.9.9.3', 'dst': '9.9.9.7'},
+ 'tcp': {'src': 32, 'dst': 33},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('tcp')}}},
+ # TCP_2:
+ # Frame Data/Protocols: Ethernet 2 0800, IPv4,TCP/IP, Fixed 64.
+ # IPv4 Header Page: Dest Address: 9.9.8.7 Src Address: 9.9.8.3
+ # TCP Header: Src Port: 32 Dest Port: 33
+ #
+ # Stream Control: Stop after this Stream, Packet Count 32.
+ #
+ 'TCP_2':{
+ 'type': 'TCP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.9.8.3', 'dst': '9.9.8.7'},
+ 'tcp': {'src': 32, 'dst': 33},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('tcp')}}},
+ 'invalid_TCP':{
+ 'type': 'TCP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.8.7.6', 'dst': '192.168.0.36'},
+ 'tcp': {'src': 10, 'dst': 11},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('tcp')}}},
+ # SCTP_1:
+ # Frame Data/Protocols: Ethernet 2 0800, IPv4, None, Fixed 256.
+ # IPv4 Header Page: Dest Address: 2.3.4.5 Src Address: 6.7.8.9
+ # Protocol: 132-SCTP
+ # Stream Control: Stop after this Stream, Packet Count 32.
+ #
+ 'SCTP_1':{
+ 'type': 'SCTP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '6.7.8.9', 'dst': '2.3.4.5'},
+ 'sctp': {'src': 32, 'dst': 33},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('sctp')}}},
+ 'invalid_SCTP':{
+ 'type': 'SCTP',
+ 'pkt_layers': {
+ #'ether': {'src': srcmac, 'dst': nutmac},
+ 'ipv4': {'src': '9.8.7.6', 'dst': '192.168.0.36'},
+ 'sctp': {'src': 10, 'dst': 11},
+ 'raw': {'payload': ['58'] * self.get_pkt_len('sctp')}}},
+ }
+
+ # create packet for send
+ streams = []
+ for stm_name in stm_names:
+ if stm_name not in pkt_configs.keys():
+ continue
+ values = pkt_configs[stm_name]
+ savePath = os.sep.join([self.target_source,
+ "pkt_{0}.pcap".format(stm_name)])
+ pkt_type = values.get('type')
+ pkt_layers = values.get('pkt_layers')
+ pkt = Packet(pkt_type=pkt_type)
+ for layer in pkt_layers.keys():
+ pkt.config_layer(layer, pkt_layers[layer])
+ #
+ pkt.pktgen.write_pcap(savePath)
+ streams.append(pkt.pktgen.pkt)
+
+ return streams
+
+ def get_stream_rule_priority(self, stream_type):
+ stream_types = {
+ 'UDP_1': 0,
+ 'UDP_2': 1,
+ 'TCP_1': 2,
+ 'TCP_2': 3,
+ 'SCTP_1': 4}
+ return stream_types.get(stream_type, None)
+
+ def traffic(self, ports_topo):
+ """
+ stream transmission on specified link topology
+ """
+ time.sleep(2)
+ result = self.send_packets_by_ixia(**ports_topo)
+ # end traffic
+ self.logger.info("complete transmission")
+
+ return result
+
+ def check_filter_pkts(self, log, rule_priority):
+ if rule_priority != None:
+ pat = "rule\[{0}\] count=(\d+)".format(rule_priority)
+ else:
+ pat = "rule\[\d+\] count=(\d+)"
+ with open(log, 'rb') as fp:
+ content = fp.read()
+ if content:
+ grp = re.findall(pat, content, re.M)
+ if grp and len(grp):
+ total = reduce(lambda x,y: x+y, [int(i) for i in grp])
+ else:
+ total = 0
+ return total
+
+ def run_test_pre(self):
+ # boot up flow_classify
+ rule_config = os.sep.join([self.target_source,
+ 'examples',
+ 'flow_classify',
+ 'ipv4_rules_file.txt'])
+ if not os.path.exists(rule_config):
+ raise VerifyFailure("rules file doesn't existed")
+ option = r" -c 4 -n 4 --file-prefix=test -- --rule_ipv4={0}".format(rule_config)
+ dt = datetime.now()
+ timestamp = dt.strftime('%Y-%m-%d_%H%M%S')
+ self.output_log = '{0}/{1}_{2}.log'.format(self.flow_classify.output,
+ self.flow_classify.name,
+ timestamp)
+ self.flow_classify.start(option)
+ time.sleep(10)
+
+ return True
+
+ def get_ixia_peer_port(self):
+ for cnt in self.dut_ports:
+ if self.tester.get_local_port_type(cnt) != 'ixia':
+ continue
+ tester_port = self.tester.get_local_port(cnt)
+ return tester_port
+
+ def run_test_post(self, **kwargs):
+ # close flow_classify
+ output = self.flow_classify.close(self.output_log)
+ return output
+
+ def run_traffic(self, **kwargs):
+ stm_types = kwargs.get('stm_types')
+ burst_packet = kwargs.get('burst_packet')
+ gap = kwargs.get('gap')
+ dma = kwargs.get('dma')
+ traffic_time = kwargs.get('traffic_time')
+ #-----------------------------------------
+ # set traffic topology
+ # for lack ixia port, one of ixia port use normal link peer
+ # so there set a hard code for temporarily usage
+ port = 0
+ tester_port_id = self.get_ixia_peer_port()
+ if self.pktgen_name == 'ixia':
+ tx_port = tester_port_id
+ else:
+ tx_port = self.tester.get_interface(tester_port_id)
+ ports_topo = {'tx_intf': tx_port,
+ 'rx_intf': 0,
+ 'stream': self.set_stream(stm_types),
+ 'stream configs': {
+ 'count': burst_packet,
+ 'frameType': {
+ # gapNanoSeconds gapMilliSeconds gapSeconds
+ 'gapUnit': 'gapMilliSeconds',
+ 'ibg': gap[0],
+ 'ifg': gap[1],
+ 'isg': gap[2]},
+ 'flow_type': dma,
+ 'stream_type': 'burst'
+ },
+ # send bursts of 32 packets
+ 'traffic_type': 'burst',
+ # 0 means stop after one round traffic
+ # xx value means stop after traffic_time time
+ 'traffic_time': traffic_time,}
+ # begin traffic checking
+ result = self.traffic(ports_topo)
+
+ return result
+
+ def check_test_result(self, **kwargs):
+ check_results = []
+ stm_types = kwargs.get('stm_types')
+ burst_packet = kwargs.get('burst_packet')
+ dma = kwargs.get('dma')
+ self.logger.info(stm_types)
+ for stm_type in stm_types:
+ rule_priority = self.get_stream_rule_priority(stm_type)
+ captured_pkts = self.check_filter_pkts(self.output_log,
+ rule_priority)
+ self.logger.info("%s %d %d"%(stm_type, rule_priority or 0,
+ captured_pkts or 0))
+ msg = ''
+ if len(stm_types) > 1:#dma == 'contBurst':
+ # check if packets are multiples of burst pkts
+ # ignore invalid rule
+ if rule_priority and captured_pkts%burst_packet != 0 :
+ msg = ("captured packets are not multiples of "
+ "burst {0} packets".format(burst_packet))
+ else:
+ continue
+ elif dma == 'stopStream':
+ if rule_priority == None and captured_pkts != 0:
+ msg = "invalid stream hasn't been filtered out"
+ elif rule_priority != None and captured_pkts != burst_packet:
+ msg = "expect {0} ".format(burst_packet) + \
+ "captured {0}".format(captured_pkts)
+ else:
+ continue
+ else:
+ continue
+ if msg:
+ check_results.append(msg)
+
+ if check_results:
+ self.logger.error(os.linesep.join(check_results))
+ raise VerifyFailure("test result fail")
+ else:
+ return True
+
+ def burst_traffic(self, stm_types=None, gap=[100, 100, 100],
+ flow_type="one burst"):
+ self.logger.info('begin to check ...... ')
+
+ info = {}
+ info['stm_types'] = stm_types
+ info['burst_packet'] = 32
+ info['gap'] = gap
+ if flow_type == "one burst":
+ info['dma'] = 'stopStream'
+ info['traffic_time'] = 0
+ else:
+ info['dma'] = 'gotoFirst'
+ info['traffic_time'] = 30
+ check_flg = False
+ #-----------------------------------------
+ try:
+ # preset test environment
+ self.run_test_pre()
+ # run traffic
+ self.run_traffic(**info)
+ check_flg = True
+ except Exception as e:
+ pass
+ finally:
+ pass
+ #-----------------------------------------
+ # close flow_classify
+ self.run_test_post(**info)
+ #-----------------------------------------
+ # analysis test result
+ if check_flg == True:
+ status = self.check_test_result(**info)
+ else:
+ status = False
+
+ return status
+
+ def check_tx_mixed(self):
+ stream_list = [
+ 'UDP_1', 'UDP_2', 'invalid_UDP',
+ 'TCP_1', 'TCP_2', 'invalid_TCP',
+ 'SCTP_1', 'invalid_SCTP']
+ paras=[[1, 10, 1000]]
+ for para in paras:
+ self.burst_traffic(stm_types=stream_list, gap=para,
+ flow_type="mixed burst")
+
+ def test_perf_udp_valid_rule(self):
+ stream_list = ['UDP_1', 'UDP_2']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_udp_invalid_rule(self):
+ stream_list = ['invalid_UDP']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_tcp_valid_rule(self):
+ stream_list = ['TCP_1', 'TCP_2']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_tcp_invalid_rule(self):
+ stream_list = ['invalid_TCP']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_sctp_valid_rule(self):
+ stream_list = ['SCTP_1']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_sctp_invalid_rule(self):
+ stream_list = ['invalid_SCTP']
+ for stm_type in stream_list:
+ self.burst_traffic([stm_type])
+
+ def test_perf_whole_rules(self):
+ self.check_tx_mixed()
\ No newline at end of file
--
1.9.3
More information about the dts
mailing list