blob: 81f76a5d12ac69956b70ec84bc0ed47dad13c492 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#!/usr/bin/env python3
2# SPDX-License-Identifier: GPL-2.0
3
4"""
5tdc.py - Linux tc (Traffic Control) unit test driver
6
7Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
8"""
9
10import re
11import os
12import sys
13import argparse
14import importlib
15import json
16import subprocess
17import time
18import traceback
19from collections import OrderedDict
20from string import Template
21
22from tdc_config import *
23from tdc_helper import *
24
25import TdcPlugin
26from TdcResults import *
27
28class PluginDependencyException(Exception):
29 def __init__(self, missing_pg):
30 self.missing_pg = missing_pg
31
32class PluginMgrTestFail(Exception):
33 def __init__(self, stage, output, message):
34 self.stage = stage
35 self.output = output
36 self.message = message
37
38class PluginMgr:
39 def __init__(self, argparser):
40 super().__init__()
41 self.plugins = {}
42 self.plugin_instances = []
43 self.failed_plugins = {}
44 self.argparser = argparser
45
46 # TODO, put plugins in order
47 plugindir = os.getenv('TDC_PLUGIN_DIR', './plugins')
48 for dirpath, dirnames, filenames in os.walk(plugindir):
49 for fn in filenames:
50 if (fn.endswith('.py') and
51 not fn == '__init__.py' and
52 not fn.startswith('#') and
53 not fn.startswith('.#')):
54 mn = fn[0:-3]
55 foo = importlib.import_module('plugins.' + mn)
56 self.plugins[mn] = foo
57 self.plugin_instances.append(foo.SubPlugin())
58
59 def load_plugin(self, pgdir, pgname):
60 pgname = pgname[0:-3]
61 foo = importlib.import_module('{}.{}'.format(pgdir, pgname))
62 self.plugins[pgname] = foo
63 self.plugin_instances.append(foo.SubPlugin())
64 self.plugin_instances[-1].check_args(self.args, None)
65
66 def get_required_plugins(self, testlist):
67 '''
68 Get all required plugins from the list of test cases and return
69 all unique items.
70 '''
71 reqs = []
72 for t in testlist:
73 try:
74 if 'requires' in t['plugins']:
75 if isinstance(t['plugins']['requires'], list):
76 reqs.extend(t['plugins']['requires'])
77 else:
78 reqs.append(t['plugins']['requires'])
79 except KeyError:
80 continue
81 reqs = get_unique_item(reqs)
82 return reqs
83
84 def load_required_plugins(self, reqs, parser, args, remaining):
85 '''
86 Get all required plugins from the list of test cases and load any plugin
87 that is not already enabled.
88 '''
89 pgd = ['plugin-lib', 'plugin-lib-custom']
90 pnf = []
91
92 for r in reqs:
93 if r not in self.plugins:
94 fname = '{}.py'.format(r)
95 source_path = []
96 for d in pgd:
97 pgpath = '{}/{}'.format(d, fname)
98 if os.path.isfile(pgpath):
99 source_path.append(pgpath)
100 if len(source_path) == 0:
101 print('ERROR: unable to find required plugin {}'.format(r))
102 pnf.append(fname)
103 continue
104 elif len(source_path) > 1:
105 print('WARNING: multiple copies of plugin {} found, using version found')
106 print('at {}'.format(source_path[0]))
107 pgdir = source_path[0]
108 pgdir = pgdir.split('/')[0]
109 self.load_plugin(pgdir, fname)
110 if len(pnf) > 0:
111 raise PluginDependencyException(pnf)
112
113 parser = self.call_add_args(parser)
114 (args, remaining) = parser.parse_known_args(args=remaining, namespace=args)
115 return args
116
117 def call_pre_suite(self, testcount, testidlist):
118 for pgn_inst in self.plugin_instances:
119 pgn_inst.pre_suite(testcount, testidlist)
120
121 def call_post_suite(self, index):
122 for pgn_inst in reversed(self.plugin_instances):
123 pgn_inst.post_suite(index)
124
125 def call_pre_case(self, caseinfo, *, test_skip=False):
126 for pgn_inst in self.plugin_instances:
127 try:
128 pgn_inst.pre_case(caseinfo, test_skip)
129 except Exception as ee:
130 print('exception {} in call to pre_case for {} plugin'.
131 format(ee, pgn_inst.__class__))
132 print('testid is {}'.format(caseinfo['id']))
133 raise
134
135 def call_post_case(self):
136 for pgn_inst in reversed(self.plugin_instances):
137 pgn_inst.post_case()
138
139 def call_pre_execute(self):
140 for pgn_inst in self.plugin_instances:
141 pgn_inst.pre_execute()
142
143 def call_post_execute(self):
144 for pgn_inst in reversed(self.plugin_instances):
145 pgn_inst.post_execute()
146
147 def call_add_args(self, parser):
148 for pgn_inst in self.plugin_instances:
149 parser = pgn_inst.add_args(parser)
150 return parser
151
152 def call_check_args(self, args, remaining):
153 for pgn_inst in self.plugin_instances:
154 pgn_inst.check_args(args, remaining)
155
156 def call_adjust_command(self, stage, command):
157 for pgn_inst in self.plugin_instances:
158 command = pgn_inst.adjust_command(stage, command)
159 return command
160
161 def set_args(self, args):
162 self.args = args
163
164 @staticmethod
165 def _make_argparser(args):
166 self.argparser = argparse.ArgumentParser(
167 description='Linux TC unit tests')
168
169def replace_keywords(cmd):
170 """
171 For a given executable command, substitute any known
172 variables contained within NAMES with the correct values
173 """
174 tcmd = Template(cmd)
175 subcmd = tcmd.safe_substitute(NAMES)
176 return subcmd
177
178
179def exec_cmd(args, pm, stage, command):
180 """
181 Perform any required modifications on an executable command, then run
182 it in a subprocess and return the results.
183 """
184 if len(command.strip()) == 0:
185 return None, None
186 if '$' in command:
187 command = replace_keywords(command)
188
189 command = pm.call_adjust_command(stage, command)
190 if args.verbose > 0:
191 print('command "{}"'.format(command))
192 proc = subprocess.Popen(command,
193 shell=True,
194 stdout=subprocess.PIPE,
195 stderr=subprocess.PIPE,
196 env=ENVIR)
197
198 try:
199 (rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT'])
200 if proc.returncode != 0 and len(serr) > 0:
201 foutput = serr.decode("utf-8", errors="ignore")
202 else:
203 foutput = rawout.decode("utf-8", errors="ignore")
204 except subprocess.TimeoutExpired:
205 foutput = "Command \"{}\" timed out\n".format(command)
206 proc.returncode = 255
207
208 proc.stdout.close()
209 proc.stderr.close()
210 return proc, foutput
211
212
213def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
214 """
215 Execute the setup/teardown commands for a test case.
216 Optionally terminate test execution if the command fails.
217 """
218 if args.verbose > 0:
219 print('{}'.format(prefix))
220 for cmdinfo in cmdlist:
221 if isinstance(cmdinfo, list):
222 exit_codes = cmdinfo[1:]
223 cmd = cmdinfo[0]
224 else:
225 exit_codes = [0]
226 cmd = cmdinfo
227
228 if not cmd:
229 continue
230
231 (proc, foutput) = exec_cmd(args, pm, stage, cmd)
232
233 if proc and (proc.returncode not in exit_codes):
234 print('', file=sys.stderr)
235 print("{} *** Could not execute: \"{}\"".format(prefix, cmd),
236 file=sys.stderr)
237 print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
238 file=sys.stderr)
239 print("returncode {}; expected {}".format(proc.returncode,
240 exit_codes))
241 print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
242 print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
243 print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
244 raise PluginMgrTestFail(
245 stage, output,
246 '"{}" did not complete successfully'.format(prefix))
247
248def run_one_test(pm, args, index, tidx):
249 global NAMES
250 result = True
251 tresult = ""
252 tap = ""
253 res = TestResult(tidx['id'], tidx['name'])
254 if args.verbose > 0:
255 print("\t====================\n=====> ", end="")
256 print("Test " + tidx["id"] + ": " + tidx["name"])
257
258 if 'skip' in tidx:
259 if tidx['skip'] == 'yes':
260 res = TestResult(tidx['id'], tidx['name'])
261 res.set_result(ResultState.skip)
262 res.set_errormsg('Test case designated as skipped.')
263 pm.call_pre_case(tidx, test_skip=True)
264 pm.call_post_execute()
265 return res
266
267 # populate NAMES with TESTID for this test
268 NAMES['TESTID'] = tidx['id']
269
270 pm.call_pre_case(tidx)
271 prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
272
273 if (args.verbose > 0):
274 print('-----> execute stage')
275 pm.call_pre_execute()
276 (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
277 if p:
278 exit_code = p.returncode
279 else:
280 exit_code = None
281
282 pm.call_post_execute()
283
284 if (exit_code is None or exit_code != int(tidx["expExitCode"])):
285 print("exit: {!r}".format(exit_code))
286 print("exit: {}".format(int(tidx["expExitCode"])))
287 #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
288 res.set_result(ResultState.fail)
289 res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout))
290 print(procout)
291 else:
292 if args.verbose > 0:
293 print('-----> verify stage')
294 match_pattern = re.compile(
295 str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE)
296 (p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"])
297 if procout:
298 match_index = re.findall(match_pattern, procout)
299 if len(match_index) != int(tidx["matchCount"]):
300 res.set_result(ResultState.fail)
301 res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
302 else:
303 res.set_result(ResultState.success)
304 elif int(tidx["matchCount"]) != 0:
305 res.set_result(ResultState.fail)
306 res.set_failmsg('No output generated by verify command.')
307 else:
308 res.set_result(ResultState.success)
309
310 prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
311 pm.call_post_case()
312
313 index += 1
314
315 # remove TESTID from NAMES
316 del(NAMES['TESTID'])
317 return res
318
319def test_runner(pm, args, filtered_tests):
320 """
321 Driver function for the unit tests.
322
323 Prints information about the tests being run, executes the setup and
324 teardown commands and the command under test itself. Also determines
325 success/failure based on the information in the test case and generates
326 TAP output accordingly.
327 """
328 testlist = filtered_tests
329 tcount = len(testlist)
330 index = 1
331 tap = ''
332 badtest = None
333 stage = None
334 emergency_exit = False
335 emergency_exit_message = ''
336
337 tsr = TestSuiteReport()
338
339 try:
340 pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
341 except Exception as ee:
342 ex_type, ex, ex_tb = sys.exc_info()
343 print('Exception {} {} (caught in pre_suite).'.
344 format(ex_type, ex))
345 traceback.print_tb(ex_tb)
346 emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
347 emergency_exit = True
348 stage = 'pre-SUITE'
349
350 if emergency_exit:
351 pm.call_post_suite(index)
352 return emergency_exit_message
353 if args.verbose > 1:
354 print('give test rig 2 seconds to stabilize')
355 time.sleep(2)
356 for tidx in testlist:
357 if "flower" in tidx["category"] and args.device == None:
358 errmsg = "Tests using the DEV2 variable must define the name of a "
359 errmsg += "physical NIC with the -d option when running tdc.\n"
360 errmsg += "Test has been skipped."
361 if args.verbose > 1:
362 print(errmsg)
363 res = TestResult(tidx['id'], tidx['name'])
364 res.set_result(ResultState.skip)
365 res.set_errormsg(errmsg)
366 tsr.add_resultdata(res)
367 continue
368 try:
369 badtest = tidx # in case it goes bad
370 res = run_one_test(pm, args, index, tidx)
371 tsr.add_resultdata(res)
372 except PluginMgrTestFail as pmtf:
373 ex_type, ex, ex_tb = sys.exc_info()
374 stage = pmtf.stage
375 message = pmtf.message
376 output = pmtf.output
377 res = TestResult(tidx['id'], tidx['name'])
378 res.set_result(ResultState.skip)
379 res.set_errormsg(pmtf.message)
380 res.set_failmsg(pmtf.output)
381 tsr.add_resultdata(res)
382 index += 1
383 print(message)
384 print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'.
385 format(ex_type, ex, index, tidx['id'], tidx['name'], stage))
386 print('---------------')
387 print('traceback')
388 traceback.print_tb(ex_tb)
389 print('---------------')
390 if stage == 'teardown':
391 print('accumulated output for this test:')
392 if pmtf.output:
393 print(pmtf.output)
394 print('---------------')
395 break
396 index += 1
397
398 # if we failed in setup or teardown,
399 # fill in the remaining tests with ok-skipped
400 count = index
401
402 if tcount + 1 != count:
403 for tidx in testlist[count - 1:]:
404 res = TestResult(tidx['id'], tidx['name'])
405 res.set_result(ResultState.skip)
406 msg = 'skipped - previous {} failed {} {}'.format(stage,
407 index, badtest.get('id', '--Unknown--'))
408 res.set_errormsg(msg)
409 tsr.add_resultdata(res)
410 count += 1
411
412 if args.pause:
413 print('Want to pause\nPress enter to continue ...')
414 if input(sys.stdin):
415 print('got something on stdin')
416
417 pm.call_post_suite(index)
418
419 return tsr
420
421def has_blank_ids(idlist):
422 """
423 Search the list for empty ID fields and return true/false accordingly.
424 """
425 return not(all(k for k in idlist))
426
427
428def load_from_file(filename):
429 """
430 Open the JSON file containing the test cases and return them
431 as list of ordered dictionary objects.
432 """
433 try:
434 with open(filename) as test_data:
435 testlist = json.load(test_data, object_pairs_hook=OrderedDict)
436 except json.JSONDecodeError as jde:
437 print('IGNORING test case file {}\n\tBECAUSE: {}'.format(filename, jde))
438 testlist = list()
439 else:
440 idlist = get_id_list(testlist)
441 if (has_blank_ids(idlist)):
442 for k in testlist:
443 k['filename'] = filename
444 return testlist
445
446
447def args_parse():
448 """
449 Create the argument parser.
450 """
451 parser = argparse.ArgumentParser(description='Linux TC unit tests')
452 return parser
453
454
455def set_args(parser):
456 """
457 Set the command line arguments for tdc.
458 """
459 parser.add_argument(
460 '--outfile', type=str,
461 help='Path to the file in which results should be saved. ' +
462 'Default target is the current directory.')
463 parser.add_argument(
464 '-p', '--path', type=str,
465 help='The full path to the tc executable to use')
466 sg = parser.add_argument_group(
467 'selection', 'select which test cases: ' +
468 'files plus directories; filtered by categories plus testids')
469 ag = parser.add_argument_group(
470 'action', 'select action to perform on selected test cases')
471
472 sg.add_argument(
473 '-D', '--directory', nargs='+', metavar='DIR',
474 help='Collect tests from the specified directory(ies) ' +
475 '(default [tc-tests])')
476 sg.add_argument(
477 '-f', '--file', nargs='+', metavar='FILE',
478 help='Run tests from the specified file(s)')
479 sg.add_argument(
480 '-c', '--category', nargs='*', metavar='CATG', default=['+c'],
481 help='Run tests only from the specified category/ies, ' +
482 'or if no category/ies is/are specified, list known categories.')
483 sg.add_argument(
484 '-e', '--execute', nargs='+', metavar='ID',
485 help='Execute the specified test cases with specified IDs')
486 ag.add_argument(
487 '-l', '--list', action='store_true',
488 help='List all test cases, or those only within the specified category')
489 ag.add_argument(
490 '-s', '--show', action='store_true', dest='showID',
491 help='Display the selected test cases')
492 ag.add_argument(
493 '-i', '--id', action='store_true', dest='gen_id',
494 help='Generate ID numbers for new test cases')
495 parser.add_argument(
496 '-v', '--verbose', action='count', default=0,
497 help='Show the commands that are being run')
498 parser.add_argument(
499 '--format', default='tap', const='tap', nargs='?',
500 choices=['none', 'xunit', 'tap'],
501 help='Specify the format for test results. (Default: TAP)')
502 parser.add_argument('-d', '--device',
503 help='Execute test cases that use a physical device, ' +
504 'where DEVICE is its name. (If not defined, tests ' +
505 'that require a physical device will be skipped)')
506 parser.add_argument(
507 '-P', '--pause', action='store_true',
508 help='Pause execution just before post-suite stage')
509 return parser
510
511
512def check_default_settings(args, remaining, pm):
513 """
514 Process any arguments overriding the default settings,
515 and ensure the settings are correct.
516 """
517 # Allow for overriding specific settings
518 global NAMES
519
520 if args.path != None:
521 NAMES['TC'] = args.path
522 if args.device != None:
523 NAMES['DEV2'] = args.device
524 if 'TIMEOUT' not in NAMES:
525 NAMES['TIMEOUT'] = None
526 if not os.path.isfile(NAMES['TC']):
527 print("The specified tc path " + NAMES['TC'] + " does not exist.")
528 exit(1)
529
530 pm.call_check_args(args, remaining)
531
532
533def get_id_list(alltests):
534 """
535 Generate a list of all IDs in the test cases.
536 """
537 return [x["id"] for x in alltests]
538
539
540def check_case_id(alltests):
541 """
542 Check for duplicate test case IDs.
543 """
544 idl = get_id_list(alltests)
545 return [x for x in idl if idl.count(x) > 1]
546
547
548def does_id_exist(alltests, newid):
549 """
550 Check if a given ID already exists in the list of test cases.
551 """
552 idl = get_id_list(alltests)
553 return (any(newid == x for x in idl))
554
555
556def generate_case_ids(alltests):
557 """
558 If a test case has a blank ID field, generate a random hex ID for it
559 and then write the test cases back to disk.
560 """
561 import random
562 for c in alltests:
563 if (c["id"] == ""):
564 while True:
565 newid = str('{:04x}'.format(random.randrange(16**4)))
566 if (does_id_exist(alltests, newid)):
567 continue
568 else:
569 c['id'] = newid
570 break
571
572 ufilename = []
573 for c in alltests:
574 if ('filename' in c):
575 ufilename.append(c['filename'])
576 ufilename = get_unique_item(ufilename)
577 for f in ufilename:
578 testlist = []
579 for t in alltests:
580 if 'filename' in t:
581 if t['filename'] == f:
582 del t['filename']
583 testlist.append(t)
584 outfile = open(f, "w")
585 json.dump(testlist, outfile, indent=4)
586 outfile.write("\n")
587 outfile.close()
588
589def filter_tests_by_id(args, testlist):
590 '''
591 Remove tests from testlist that are not in the named id list.
592 If id list is empty, return empty list.
593 '''
594 newlist = list()
595 if testlist and args.execute:
596 target_ids = args.execute
597
598 if isinstance(target_ids, list) and (len(target_ids) > 0):
599 newlist = list(filter(lambda x: x['id'] in target_ids, testlist))
600 return newlist
601
602def filter_tests_by_category(args, testlist):
603 '''
604 Remove tests from testlist that are not in a named category.
605 '''
606 answer = list()
607 if args.category and testlist:
608 test_ids = list()
609 for catg in set(args.category):
610 if catg == '+c':
611 continue
612 print('considering category {}'.format(catg))
613 for tc in testlist:
614 if catg in tc['category'] and tc['id'] not in test_ids:
615 answer.append(tc)
616 test_ids.append(tc['id'])
617
618 return answer
619
620
621def get_test_cases(args):
622 """
623 If a test case file is specified, retrieve tests from that file.
624 Otherwise, glob for all json files in subdirectories and load from
625 each one.
626 Also, if requested, filter by category, and add tests matching
627 certain ids.
628 """
629 import fnmatch
630
631 flist = []
632 testdirs = ['tc-tests']
633
634 if args.file:
635 # at least one file was specified - remove the default directory
636 testdirs = []
637
638 for ff in args.file:
639 if not os.path.isfile(ff):
640 print("IGNORING file " + ff + "\n\tBECAUSE does not exist.")
641 else:
642 flist.append(os.path.abspath(ff))
643
644 if args.directory:
645 testdirs = args.directory
646
647 for testdir in testdirs:
648 for root, dirnames, filenames in os.walk(testdir):
649 for filename in fnmatch.filter(filenames, '*.json'):
650 candidate = os.path.abspath(os.path.join(root, filename))
651 if candidate not in testdirs:
652 flist.append(candidate)
653
654 alltestcases = list()
655 for casefile in flist:
656 alltestcases = alltestcases + (load_from_file(casefile))
657
658 allcatlist = get_test_categories(alltestcases)
659 allidlist = get_id_list(alltestcases)
660
661 testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist)
662 idtestcases = filter_tests_by_id(args, alltestcases)
663 cattestcases = filter_tests_by_category(args, alltestcases)
664
665 cat_ids = [x['id'] for x in cattestcases]
666 if args.execute:
667 if args.category:
668 alltestcases = cattestcases + [x for x in idtestcases if x['id'] not in cat_ids]
669 else:
670 alltestcases = idtestcases
671 else:
672 if cat_ids:
673 alltestcases = cattestcases
674 else:
675 # just accept the existing value of alltestcases,
676 # which has been filtered by file/directory
677 pass
678
679 return allcatlist, allidlist, testcases_by_cats, alltestcases
680
681
682def set_operation_mode(pm, parser, args, remaining):
683 """
684 Load the test case data and process remaining arguments to determine
685 what the script should do for this run, and call the appropriate
686 function.
687 """
688 ucat, idlist, testcases, alltests = get_test_cases(args)
689
690 if args.gen_id:
691 if (has_blank_ids(idlist)):
692 alltests = generate_case_ids(alltests)
693 else:
694 print("No empty ID fields found in test files.")
695 exit(0)
696
697 duplicate_ids = check_case_id(alltests)
698 if (len(duplicate_ids) > 0):
699 print("The following test case IDs are not unique:")
700 print(str(set(duplicate_ids)))
701 print("Please correct them before continuing.")
702 exit(1)
703
704 if args.showID:
705 for atest in alltests:
706 print_test_case(atest)
707 exit(0)
708
709 if isinstance(args.category, list) and (len(args.category) == 0):
710 print("Available categories:")
711 print_sll(ucat)
712 exit(0)
713
714 if args.list:
715 if args.list:
716 list_test_cases(alltests)
717 exit(0)
718
719 if len(alltests):
720 req_plugins = pm.get_required_plugins(alltests)
721 try:
722 args = pm.load_required_plugins(req_plugins, parser, args, remaining)
723 except PluginDependencyException as pde:
724 print('The following plugins were not found:')
725 print('{}'.format(pde.missing_pg))
726 catresults = test_runner(pm, args, alltests)
727 if args.format == 'none':
728 print('Test results output suppression requested\n')
729 else:
730 print('\nAll test results: \n')
731 if args.format == 'xunit':
732 suffix = 'xml'
733 res = catresults.format_xunit()
734 elif args.format == 'tap':
735 suffix = 'tap'
736 res = catresults.format_tap()
737 print(res)
738 print('\n\n')
739 if not args.outfile:
740 fname = 'test-results.{}'.format(suffix)
741 else:
742 fname = args.outfile
743 with open(fname, 'w') as fh:
744 fh.write(res)
745 fh.close()
746 if os.getenv('SUDO_UID') is not None:
747 os.chown(fname, uid=int(os.getenv('SUDO_UID')),
748 gid=int(os.getenv('SUDO_GID')))
749 else:
750 print('No tests found\n')
751
752def main():
753 """
754 Start of execution; set up argument parser and get the arguments,
755 and start operations.
756 """
757 parser = args_parse()
758 parser = set_args(parser)
759 pm = PluginMgr(parser)
760 parser = pm.call_add_args(parser)
761 (args, remaining) = parser.parse_known_args()
762 args.NAMES = NAMES
763 pm.set_args(args)
764 check_default_settings(args, remaining, pm)
765 if args.verbose > 2:
766 print('args is {}'.format(args))
767
768 set_operation_mode(pm, parser, args, remaining)
769
770 exit(0)
771
772
773if __name__ == "__main__":
774 main()