From e3e52d8c7419281d96e8181de16cc27fb9e685c7 Mon Sep 17 00:00:00 2001 From: Martin Mathieson Date: Fri, 12 Jul 2024 15:08:46 +0000 Subject: [PATCH] Fix up some python script issues seen with ruff --- doc/extcap_example.py | 1 - tools/asn2wrs.py | 63 ++++++++++++++--------------- tools/check_dissector_urls.py | 22 +++++----- tools/check_spelling.py | 12 +++--- tools/check_static.py | 16 ++++---- tools/check_tfs.py | 17 ++------ tools/check_typed_item_calls.py | 54 ++++++++++++------------- tools/check_val_to_str.py | 2 +- tools/convert-glib-types.py | 2 +- tools/delete_includes.py | 3 -- tools/generate_authors.py | 2 - tools/make-bluetooth.py | 2 - tools/make-isobus.py | 15 ++++--- tools/make-manuf.py | 3 +- tools/make-services.py | 19 ++++----- tools/parse_xml2skinny_dissector.py | 26 ++++++------ tools/pre-commit-ignore.py | 5 +-- tools/validate-commit.py | 5 +-- 18 files changed, 121 insertions(+), 148 deletions(-) diff --git a/doc/extcap_example.py b/doc/extcap_example.py index e7af44b4d5..60ec7e930c 100755 --- a/doc/extcap_example.py +++ b/doc/extcap_example.py @@ -36,7 +36,6 @@ import re import argparse import time import struct -import array from threading import Thread ERROR_USAGE = 0 diff --git a/tools/asn2wrs.py b/tools/asn2wrs.py index b6ecc81bc1..10266c4ffd 100755 --- a/tools/asn2wrs.py +++ b/tools/asn2wrs.py @@ -45,7 +45,7 @@ import os import os.path import time import getopt -import traceback +#import traceback try: from ply import lex @@ -645,9 +645,9 @@ class EthCtx: return False def value_max(self, a, b): - if (a == 'MAX') or (b == 'MAX'): return 'MAX'; - if a == 'MIN': return b; - if b == 'MIN': return a; + if (a == 'MAX') or (b == 'MAX'): return 'MAX' + if a == 'MIN': return b + if b == 'MIN': return a try: if (int(a) > int(b)): return a @@ -658,9 +658,9 @@ class EthCtx: return "MAX((%s),(%s))" % (a, b) def value_min(self, a, b): - if (a == 'MIN') or (b == 'MIN'): return 'MIN'; - if a == 'MAX': return b; - if b == 'MAX': return a; + if (a == 'MIN') or (b == 'MIN'): return 'MIN' + if a == 'MAX': return b + if b == 'MAX': return a try: if (int(a) < int(b)): return a @@ -724,7 +724,7 @@ class EthCtx: val = self.type[t]['val'] (ftype, display) = val.eth_ftype(self) attr.update({ 'TYPE' : ftype, 'DISPLAY' : display, - 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }); + 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }) else: attr.update(self.type[t]['attr']) attr.update(self.eth_type[self.type[t]['ethname']]['attr']) @@ -1061,7 +1061,7 @@ class EthCtx: #--- eth_clean -------------------------------------------------------------- def eth_clean(self): - self.proto = self.proto_opt; + self.proto = self.proto_opt #--- ASN.1 tables ---------------- self.assign = {} self.assign_ord = [] @@ -1243,14 +1243,12 @@ class EthCtx: for t in self.eth_type_ord: bits = self.eth_type[t]['val'].eth_named_bits() if (bits): - old_val = 0 for (val, id) in bits: self.named_bit.append({'name' : id, 'val' : val, 'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)), 'ftype' : 'FT_BOOLEAN', 'display' : '8', 'strings' : 'NULL', 'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]}) - old_val = val + 1 if self.eth_type[t]['val'].eth_need_tree(): self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t) else: @@ -1614,7 +1612,7 @@ class EthCtx: #--- eth_out_pdu_decl ---------------------------------------------------------- def eth_out_pdu_decl(self, f): - t = self.eth_hf[f]['ethtype'] + #t = self.eth_hf[f]['ethtype'] out = '' if (not self.eth_hf[f]['pdu']['export']): out += 'static ' @@ -1994,7 +1992,7 @@ class EthCtx: if first_decl: fx.write(' /*--- Syntax registrations ---*/\n') first_decl = False - fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu'])); + fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu'])) fempty=False self.output.file_close(fx, discard=fempty) @@ -2338,13 +2336,13 @@ class EthCnf: return name in self.fn and self.fn[name]['FN_BODY'] def get_fn_text(self, name, ctx): if (name not in self.fn): - return ''; + return '' if (not self.fn[name][ctx]): - return ''; + return '' self.fn[name][ctx]['used'] = True out = self.fn[name][ctx]['text'] if (not self.suppress_line): - out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out); + out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out) return out def add_pdu(self, par, fn, lineno): @@ -2966,7 +2964,7 @@ class EthOut: #--- output_fname ------------------------------------------------------- def output_fname(self, ftype, ext='c'): fn = '' - if not ext in ('cnf',): + if ext not in ('cnf',): fn += 'packet-' fn += self.outnm if (ftype): @@ -3063,10 +3061,10 @@ class EthOut: include = re.compile(r'^\s*#\s*include\s+[<"](?P[^>"]+)[>"]', re.IGNORECASE) - cont_linenum = 0; + cont_linenum = 0 while (True): - cont_linenum = cont_linenum + 1; + cont_linenum = cont_linenum + 1 line = fin.readline() if (line == ''): break ifile = None @@ -3748,9 +3746,9 @@ class Module (Node): class Module_Body (Node): def to_python (self, ctx): # XXX handle exports, imports. - l = [x.to_python (ctx) for x in self.assign_list] - l = [a for a in l if a != ''] - return "\n".join (l) + list = [x.to_python (ctx) for x in self.assign_list] + list = [a for a in list if a != ''] + return "\n".join(list) def to_eth(self, ectx): # Exports @@ -4093,7 +4091,8 @@ class SeqType (SqType): autotag = True lst = self.all_components() for e in (self.elt_list): - if e.val.HasOwnTag(): autotag = False; break; + if e.val.HasOwnTag(): autotag = False + break # expand COMPONENTS OF if self.need_components(): if components_available: @@ -4113,7 +4112,7 @@ class SeqType (SqType): e.val.SetName("eag_v%s" % (e.val.ver)) else: e.val.SetName("eag_%d" % (eag_num)) - eag_num += 1; + eag_num += 1 else: # expand new_ext_list = [] for e in (self.ext_list): @@ -4498,10 +4497,10 @@ class ChoiceType (Type): if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')): autotag = True for e in (self.elt_list): - if e.HasOwnTag(): autotag = False; break; + if e.HasOwnTag(): autotag = False; break if autotag and hasattr(self, 'ext_list'): for e in (self.ext_list): - if e.HasOwnTag(): autotag = False; break; + if e.HasOwnTag(): autotag = False; break # do autotag if autotag: atag = 0 @@ -5613,7 +5612,7 @@ class BitStringType (Type): if (self.named_list): sorted_list = self.named_list sorted_list.sort() - expected_bit_no = 0; + expected_bit_no = 0 for e in (sorted_list): # Fill the table with "spare_bit" for "un named bits" if (int(e.val) != 0) and (expected_bit_no != int(e.val)): @@ -7350,7 +7349,7 @@ def p_cls_syntax_4 (t): def p_cls_syntax_5 (t): 'cls_syntax : CODE Value' - fld = get_class_field(t[1]); + fld = get_class_field(t[1]) t[0] = { fld : t[2] } if isinstance(t[2], ChoiceValue): fldt = fld + '.' + t[2].choice @@ -8074,7 +8073,7 @@ def asn2wrs_main(): global quiet try: - opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:qFTo:O:c:I:eESs:kLCr:"); + opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:qFTo:O:c:I:eESs:kLCr:") except getopt.GetoptError: eth_usage(); sys.exit(2) if len(args) < 1: @@ -8097,10 +8096,10 @@ def asn2wrs_main(): ectx.merge_modules = False ectx.group_by_prot = False ectx.conform.last_group = 0 - ectx.conform.suppress_line = False; + ectx.conform.suppress_line = False ectx.output.outnm = None ectx.output.single_file = None - ectx.constraints_check = False; + ectx.constraints_check = False for o, a in opts: if o in ("-h", "-?"): eth_usage(); sys.exit(2) @@ -8138,7 +8137,7 @@ def asn2wrs_main(): if a: par.append(a) ectx.conform.set_opt(o, par, "commandline", 0) - (ld, yd, pd) = (0, 0, 0); + (ld, yd, pd) = (0, 0, 0) if ectx.dbg('l'): ld = 1 if ectx.dbg('y'): yd = 1 if ectx.dbg('p'): pd = 2 diff --git a/tools/check_dissector_urls.py b/tools/check_dissector_urls.py index 28d3954a58..bdf0c81913 100755 --- a/tools/check_dissector_urls.py +++ b/tools/check_dissector_urls.py @@ -191,8 +191,8 @@ async def check_all_links(links): except (asyncio.CancelledError): await session.close() - for l in links: - l.validate() + for link in links: + link.validate() ################################################################# @@ -288,21 +288,21 @@ asyncio.run(check_all_links(links)) if os.path.exists('failures.txt'): shutil.copyfile('failures.txt', 'failures_last_run.txt') with open('failures.txt', 'w') as f_f: - for l in links: - if l.tested and not l.success: - f_f.write(str(l) + '\n') + for link in links: + if link.tested and not link.success: + f_f.write(str(link) + '\n') # And successes with open('successes.txt', 'w') as f_s: - for l in links: - if l.tested and l.success: - f_s.write(str(l) + '\n') + for link in links: + if link.tested and link.success: + f_s.write(str(link) + '\n') # Count and show overall stats. passed, failed = 0, 0 -for l in links: - if l.tested: - if l.success: +for link in links: + if link.tested: + if link.success: passed += 1 else: failed += 1 diff --git a/tools/check_spelling.py b/tools/check_spelling.py index 619ab871d7..c8b54f8106 100755 --- a/tools/check_spelling.py +++ b/tools/check_spelling.py @@ -12,8 +12,9 @@ import subprocess import argparse import signal import glob -from collections import Counter +from spellchecker import SpellChecker +from collections import Counter from html.parser import HTMLParser import urllib.request @@ -51,7 +52,6 @@ signal.signal(signal.SIGINT, signal_handler) # Create spellchecker, and augment with some Wireshark words. -from spellchecker import SpellChecker # Set up our dict with words from text file. spell = SpellChecker() spell.word_frequency.load_text_file('./tools/wireshark_words.txt') @@ -499,7 +499,7 @@ if not args.no_wikipedia: parser.feed(content) content = parser.content.strip() - wiki_db = dict(l.lower().split('->', maxsplit=1) for l in content.splitlines()) + wiki_db = dict(line.lower().split('->', maxsplit=1) for line in content.splitlines()) del wiki_db['cmo'] # All false positives. del wiki_db['ect'] # Too many false positives. del wiki_db['thru'] # We'll let that one thru. ;-) @@ -514,11 +514,11 @@ if not args.no_wikipedia: spell.word_frequency.remove_words([word]) #print('Removed', word) removed += 1 - except: + except Exception: pass print('Removed', removed, 'known bad words') - except: + except Exception: print('Failed to fetch and/or parse Wikipedia mispellings!') @@ -555,7 +555,7 @@ if args.open: # Filter files. files_staged = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files_staged)) for f in files_staged: - if not f in files: + if f not in files: files.append(f) if args.glob: diff --git a/tools/check_static.py b/tools/check_static.py index 000227c683..7af30a301c 100755 --- a/tools/check_static.py +++ b/tools/check_static.py @@ -61,14 +61,14 @@ class CalledSymbols: # Run command to check symbols. command = ['nm', object_file] for f in subprocess.check_output(command).splitlines(): - l = str(f)[2:-1] + line = str(f)[2:-1] # Lines might, or might not, have an address before letter and symbol. p1 = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') p2 = re.compile(r'[ ]* ([a-zA-Z]) (.*)') - m = p1.match(l) + m = p1.match(line) if not m: - m = p2.match(l) + m = p2.match(line) if m: letter = m.group(1) function_name = m.group(2) @@ -113,15 +113,15 @@ class DefinedSymbols: command = ['nm', object_file] for f in subprocess.check_output(command).splitlines(): # Line consists of whitespace, [address], letter, symbolName - l = str(f)[2:-1] + line = str(f)[2:-1] p = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') - m = p.match(l) + m = p.match(line) if m: letter = m.group(1) function_name = m.group(2) # Globally-defined symbols. Would be 't' or 'd' if already static.. if letter in 'TD': - self.addDefinedSymbol(function_name, l) + self.addDefinedSymbol(function_name, line) def addDefinedSymbol(self, symbol, line): self.global_symbols[symbol] = line @@ -156,7 +156,7 @@ class DefinedSymbols: def checkIfSymbolsAreCalled(self, called_symbols): global issues_found for f in self.global_symbols: - if not f in called_symbols: + if f not in called_symbols: mentioned_in_header = self.mentionedInHeaders(f) fun = self.global_symbols[f] print(self.filename, '' if not self.from_generated_file else '(GENERATED)', @@ -299,7 +299,7 @@ elif args.open: for f in files: files.append(f) for f in files_staged: - if not f in files: + if f not in files: files.append(f) else: # Find all dissector files from folder. diff --git a/tools/check_tfs.py b/tools/check_tfs.py index 5f8cdcb688..d9e2e5ae07 100755 --- a/tools/check_tfs.py +++ b/tools/check_tfs.py @@ -209,16 +209,10 @@ class Item: if self.check_bit(self.mask_value, n): self.bits_set += 1 - def check_bit(self, value, n): - return (value & (0x1 << n)) != 0 - - def __str__(self): return 'Item ({0} "{1}" {2} type={3}:{4} strings={5} mask={6})'.format(self.filename, self.label, self.filter, self.item_type, self.type_modifier, self.strings, self.mask) - - def set_mask_value(self, macros): try: self.mask_read = True @@ -226,12 +220,11 @@ class Item: # Substitute mask if found as a macro.. if self.mask in macros: self.mask = macros[self.mask] - elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): + elif any(c not in '0123456789abcdefABCDEFxX' for c in self.mask): self.mask_read = False self.mask_value = 0 return - # Read according to the appropriate base. if self.mask.startswith('0x'): self.mask_value = int(self.mask, 16) @@ -239,7 +232,7 @@ class Item: self.mask_value = int(self.mask, 8) else: self.mask_value = int(self.mask, 10) - except: + except Exception: self.mask_read = False self.mask_value = 0 @@ -261,8 +254,7 @@ class Item: try: # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. return int((int(self.type_modifier) + 3)/4)*4 - except: - #print('oops', self) + except Exception: return 0 else: if self.item_type in field_widths: @@ -332,7 +324,6 @@ def findValueStrings(filename): # Look for hf items (i.e. full item to be registered) in a dissector file. def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): - is_generated = isGeneratedFile(filename) items = {} with open(filename, 'r', encoding="utf8", errors="ignore") as f: contents = f.read() @@ -501,7 +492,7 @@ def checkFile(filename, common_tfs, look_for_common=False, check_value_strings=F for c in common_tfs: m = re.search(r'TFS\(\s*\&' + c + r'\s*\)', contents) if m: - if not c in common_usage: + if c not in common_usage: common_usage[c] = 1 else: common_usage[c] += 1 diff --git a/tools/check_typed_item_calls.py b/tools/check_typed_item_calls.py index 1270cb85ae..c89c7633a1 100755 --- a/tools/check_typed_item_calls.py +++ b/tools/check_typed_item_calls.py @@ -47,12 +47,12 @@ class Call: if length: try: self.length = int(length) - except: + except Exception: if length.isupper(): if length in macros: try: self.length = int(macros[length]) - except: + except Exception: pass pass @@ -148,7 +148,6 @@ class APICheck: length = m.group(3) # Add call. We have length if re had 3 groups. - num_groups = self.p.groups self.calls.append(Call(m.group(2), macros, line_number=line_number, @@ -163,7 +162,6 @@ class APICheck: # Walk past any l.s. 0 bits in value n = 0 - mask_start = n # Walk through any bits that are set and check they are in mask while self.check_bit(value, n) and n <= 63: if not self.check_bit(mask, n): @@ -191,7 +189,7 @@ class APICheck: warnings_found += 1 # Needs a +ve length - if self.positive_length and call.length != None: + if self.positive_length and call.length is not None: if call.length != -1 and call.length <= 0: print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + self.file + ':' + str(call.line_number) + @@ -200,7 +198,7 @@ class APICheck: if call.hf_name in items_defined: # Is type allowed? - if not items_defined[call.hf_name].item_type in self.allowed_types: + if items_defined[call.hf_name].item_type not in self.allowed_types: print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + self.file + ':' + str(call.line_number) + ' with type ' + items_defined[call.hf_name].item_type) @@ -226,7 +224,7 @@ class APICheck: warnings_found += 1 if check_missing_items: - if call.hf_name in items_declared and not call.hf_name in items_defined and not call.hf_name in items_declared_extern: + if call.hf_name in items_declared and call.hf_name not in items_defined and call.hf_name not in items_declared_extern: #not in common_hf_var_names: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') @@ -284,7 +282,7 @@ class ProtoTreeAddItemCheck(APICheck): enc = m.group(3) hf_name = m.group(1) if not enc.startswith('ENC_'): - if not enc in { 'encoding', 'enc', 'client_is_le', 'cigi_byte_order', 'endian', 'endianess', 'machine_encoding', 'byte_order', 'bLittleEndian', + if enc not in { 'encoding', 'enc', 'client_is_le', 'cigi_byte_order', 'endian', 'endianess', 'machine_encoding', 'byte_order', 'bLittleEndian', 'p_mq_parm->mq_str_enc', 'p_mq_parm->mq_int_enc', 'iEnc', 'strid_enc', 'iCod', 'nl_data->encoding', 'argp->info->encoding', 'gquic_info->encoding', 'writer_encoding', @@ -343,7 +341,7 @@ class ProtoTreeAddItemCheck(APICheck): 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length) warnings_found += 1 elif check_missing_items: - if call.hf_name in items_declared and not call.hf_name in items_declared_extern: + if call.hf_name in items_declared and call.hf_name not in items_declared_extern: #not in common_hf_var_names: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') @@ -570,7 +568,7 @@ class ValueString: value,label = m.group(1), m.group(2) if value in macros: value = macros[value] - elif any(not c in '0123456789abcdefABCDEFxX' for c in value): + elif any(c not in '0123456789abcdefABCDEFxX' for c in value): self.valid = False return @@ -584,7 +582,7 @@ class ValueString: value = int(value, 8) else: value = int(value, 10) - except: + except Exception: return global warnings_found @@ -634,7 +632,7 @@ class ValueString: span = self.max_value - self.min_value + 1 if num_items > 4 and span > num_items and (span-num_items <=1): for val in range(self.min_value, self.max_value): - if not val in self.parsed_vals: + if val not in self.parsed_vals: print('Warning:', self.file, ': value_string', self.name, '- value', val, 'missing?', '(', num_items, 'entries)') global warnings_found warnings_found += 1 @@ -652,7 +650,7 @@ class ValueString: # Be forgiving about first or last entry first_val = list(self.parsed_vals)[0] last_val = list(self.parsed_vals)[-1] - if not first_val in matching_label_entries or not last_val in matching_label_entries: + if first_val not in matching_label_entries or last_val not in matching_label_entries: return print('Warning:', self.file, ': value_string', self.name, 'Labels match value except for 1!', matching_label_entries, num_items, self) @@ -710,12 +708,12 @@ class RangeString: min,max,label = m.group(1), m.group(2), m.group(3) if min in macros: min = macros[min] - elif any(not c in '0123456789abcdefABCDEFxX' for c in min): + elif any(c not in '0123456789abcdefABCDEFxX' for c in min): self.valid = False return if max in macros: max = macros[max] - elif any(not c in '0123456789abcdefABCDEFxX' for c in max): + elif any(c not in '0123456789abcdefABCDEFxX' for c in max): self.valid = False return @@ -738,7 +736,7 @@ class RangeString: max = int(max, 8) else: max = int(max, 10) - except: + except Exception: return # Now check what we've found. @@ -958,7 +956,7 @@ class Item: # Optionally check that mask bits are contiguous if check_mask: - if self.mask_read and not mask in { 'NULL', '0x0', '0', '0x00' }: + if self.mask_read and mask not in { 'NULL', '0x0', '0', '0x00' }: self.check_contiguous_bits(mask) self.check_num_digits(self.mask) # N.B., if last entry in set is removed, see around 18,000 warnings @@ -1063,7 +1061,7 @@ class Item: # Substitute mask if found as a macro.. if self.mask in macros: self.mask = macros[self.mask] - elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): + elif any(c not in '0123456789abcdefABCDEFxX' for c in self.mask): self.mask_read = False self.mask_value = 0 #print(self.filename, 'Could not read:', '"' + self.mask + '"') @@ -1076,7 +1074,7 @@ class Item: self.mask_value = int(self.mask, 8) else: self.mask_value = int(self.mask, 10) - except: + except Exception: self.mask_read = False self.mask_value = 0 @@ -1092,7 +1090,7 @@ class Item: # Substitute display if found as a macro.. if display in macros: display = macros[display] - elif any(not c in '0123456789abcdefABCDEFxX' for c in display): + elif any(c not in '0123456789abcdefABCDEFxX' for c in display): self.display_read = False self.display_value = 0 return @@ -1104,7 +1102,7 @@ class Item: self.display_value = int(display, 8) else: self.display_value = int(display, 10) - except: + except Exception: self.display_read = False self.display_value = 0 @@ -1195,7 +1193,7 @@ class Item: # Look up the field width field_width = 0 - if not self.item_type in field_widths: + if self.item_type not in field_widths: print('unexpected item_type is ', self.item_type) field_width = 64 else: @@ -1233,7 +1231,7 @@ class Item: try: # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. return int((int(self.display) + 3)/4)*4 - except: + except Exception: return None else: if self.item_type in field_widths: @@ -1323,13 +1321,11 @@ class Item: def check_mask_if_in_field_array(self, mask, field_arrays): # Work out if this item appears in a field array found = False - array_name = None for arr in field_arrays: list = field_arrays[arr][0] if self.hf in list: # These need to have a mask - don't judge for being 0 found = True - array_name = arr break if found: @@ -1447,8 +1443,8 @@ class CombinedCallsCheck: # More compelling if close together.. if call.line_number>prev.line_number and call.line_number-prev.line_number <= 4: scope_different = False - for l in range(prev.line_number, call.line_number-1): - if lines[l].find('{') != -1 or lines[l].find('}') != -1 or lines[l].find('else') != -1 or lines[l].find('break;') != -1 or lines[l].find('if ') != -1: + for no in range(prev.line_number, call.line_number-1): + if lines[no].find('{') != -1 or lines[no].find('}') != -1 or lines[no].find('else') != -1 or lines[no].find('break;') != -1 or lines[no].find('if ') != -1: scope_different = True break # Also more compelling if check for and scope changes { } in lines in-between? @@ -1668,7 +1664,7 @@ def find_field_arrays(filename, all_fields, all_hf): for m in matches: name = m.group(1) # Ignore if not used in a call to an _add_bitmask_ API - if not name in all_fields: + if name not in all_fields: continue fields_text = m.group(2) @@ -1944,7 +1940,7 @@ elif args.open: # Only interested in dissector files. files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) for f in files_staged: - if not f in files: + if f not in files: files.append(f) else: # Find all dissector files. diff --git a/tools/check_val_to_str.py b/tools/check_val_to_str.py index d99d2a40c0..7ade009ec9 100755 --- a/tools/check_val_to_str.py +++ b/tools/check_val_to_str.py @@ -207,7 +207,7 @@ elif args.open: # Only interested in dissector files. files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) for f in files_staged: - if not f in files: + if f not in files: files.append(f) else: # Find all dissector files from folder. diff --git a/tools/convert-glib-types.py b/tools/convert-glib-types.py index fe3e4748e0..b74f2921a6 100755 --- a/tools/convert-glib-types.py +++ b/tools/convert-glib-types.py @@ -103,7 +103,7 @@ def convert_file(file): except UnicodeDecodeError: sys.stderr.write(f"{file} isn't valid UTF-8.\n") return - except: + except Exception: sys.stderr.write(f'Unable to open {file}.\n') return diff --git a/tools/delete_includes.py b/tools/delete_includes.py index cc804e0b04..8ea5e80625 100755 --- a/tools/delete_includes.py +++ b/tools/delete_includes.py @@ -18,7 +18,6 @@ import sys import shutil import argparse import signal -import re from pathlib import Path @@ -190,8 +189,6 @@ def test_file(filename): # Don't want to delete 'self-includes', so prepare filename. module_name = Path(filename).stem - extension = Path(filename).suffix - module_header = module_name + '.h' # Loop around, finding all possible include lines to comment out diff --git a/tools/generate_authors.py b/tools/generate_authors.py index 8d4f0a4e42..a536b850fb 100755 --- a/tools/generate_authors.py +++ b/tools/generate_authors.py @@ -16,10 +16,8 @@ # SPDX-License-Identifier: GPL-2.0-or-later import argparse -import io import re import subprocess -import sys def get_git_authors(): diff --git a/tools/make-bluetooth.py b/tools/make-bluetooth.py index c756075921..68f17c797b 100755 --- a/tools/make-bluetooth.py +++ b/tools/make-bluetooth.py @@ -13,9 +13,7 @@ The Bluetooth SIG Repository: https://bitbucket.org/bluetooth-SIG/public/src/mai and processes the YAML into human-readable strings to go into packet-bluetooth.c. ''' -import re import sys -import string import urllib.request, urllib.error, urllib.parse import yaml diff --git a/tools/make-isobus.py b/tools/make-isobus.py index ce0259c7ea..792f2b337e 100644 --- a/tools/make-isobus.py +++ b/tools/make-isobus.py @@ -41,7 +41,6 @@ def open_url_zipped(url): return zipfile.ZipFile(io.BytesIO(body)) def main(): - this_dir = os.path.dirname(__file__) isobus_output_path = os.path.join('epan', 'dissectors', 'packet-isobus-parameters.h') isobus_zip_url = [ "https://www.isobus.net/isobus/attachments/", "isoExport_csv.zip"] @@ -132,7 +131,7 @@ def main(): pgn_id, pgn_name, = row[:2] if not pgn_name.startswith("Proprietary B"): pgn_names[int(pgn_id)] = pgn_name.replace("\"","'") - except: + except Exception: pass # prepare output file @@ -164,7 +163,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_industry_groups_ext = VALUE_STRING_EXT_INIT(_isobus_industry_groups);\n\n"); + output_fd.write("static value_string_ext isobus_industry_groups_ext = VALUE_STRING_EXT_INIT(_isobus_industry_groups);\n\n") # Write Vehicle System Names output_fd.write("/* key: 256 * Industry-Group-ID + Vehicle-Group-ID */\n") @@ -175,7 +174,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_vehicle_systems_ext = VALUE_STRING_EXT_INIT(_isobus_vehicle_systems);\n\n"); + output_fd.write("static value_string_ext isobus_vehicle_systems_ext = VALUE_STRING_EXT_INIT(_isobus_vehicle_systems);\n\n") # Write Global Name Functions output_fd.write("static const value_string _isobus_global_name_functions[] = {\n") @@ -185,7 +184,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_global_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_global_name_functions);\n\n"); + output_fd.write("static value_string_ext isobus_global_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_global_name_functions);\n\n") # IG Specific Global Name Functions output_fd.write("/* key: 65536 * Industry-Group-ID + 256 * Vehicle-System-ID + Function-ID */\n") @@ -196,7 +195,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_ig_specific_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_ig_specific_name_functions);\n\n"); + output_fd.write("static value_string_ext isobus_ig_specific_name_functions_ext = VALUE_STRING_EXT_INIT(_isobus_ig_specific_name_functions);\n\n") # Write Manufacturers output_fd.write("static const value_string _isobus_manufacturers[] = {\n") @@ -206,7 +205,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_manufacturers_ext = VALUE_STRING_EXT_INIT(_isobus_manufacturers);\n\n"); + output_fd.write("static value_string_ext isobus_manufacturers_ext = VALUE_STRING_EXT_INIT(_isobus_manufacturers);\n\n") # PGN Names output_fd.write("static const value_string _isobus_pgn_names[] = {\n") @@ -216,7 +215,7 @@ def main(): output_fd.write(" { 0, NULL }\n") output_fd.write("};\n") - output_fd.write("static value_string_ext isobus_pgn_names_ext = VALUE_STRING_EXT_INIT(_isobus_pgn_names);\n\n"); + output_fd.write("static value_string_ext isobus_pgn_names_ext = VALUE_STRING_EXT_INIT(_isobus_pgn_names);\n\n") output_fd.write("#endif /* __PACKET_ISOBUS_PARAMETERS_H__ */") if __name__ == '__main__': diff --git a/tools/make-manuf.py b/tools/make-manuf.py index 81414c46bc..8f0064055b 100755 --- a/tools/make-manuf.py +++ b/tools/make-manuf.py @@ -230,7 +230,6 @@ def prefix_to_oui(prefix, prefix_map): return '{}/{:d}'.format(oui, int(pfx_len)), kind def main(): - this_dir = os.path.dirname(__file__) manuf_path = os.path.join('epan', 'manuf-data.c') ieee_d = { @@ -246,7 +245,7 @@ def main(): MA_S: {}, } - min_total = 35000; # 35830 as of 2018-09-05 + min_total = 35000 # 35830 as of 2018-09-05 total_added = 0 # Add IEEE entries from each of their databases diff --git a/tools/make-services.py b/tools/make-services.py index c55f0f955a..a711ffcc71 100755 --- a/tools/make-services.py +++ b/tools/make-services.py @@ -9,6 +9,14 @@ # # SPDX-License-Identifier: GPL-2.0-or-later +import sys +import getopt +import csv +import re +import collections +import urllib.request, urllib.error, urllib.parse +import codecs + iana_svc_url = 'https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv' __doc__ = '''\ @@ -18,13 +26,6 @@ url defaults to %s ''' % (iana_svc_url) -import sys -import getopt -import csv -import re -import collections -import urllib.request, urllib.error, urllib.parse -import codecs services_file = 'epan/services-data.c' @@ -105,7 +106,7 @@ def parse_rows(svc_fd): if description == service or description == service.replace("-", " "): description = None - if not port in services_map: + if port not in services_map: services_map[port] = collections.OrderedDict() # Remove some duplicates (first entry wins) @@ -117,7 +118,7 @@ def parse_rows(svc_fd): if proto_exists: continue - if not service in services_map[port]: + if service not in services_map[port]: services_map[port][service] = [description] services_map[port][service].append(proto) diff --git a/tools/parse_xml2skinny_dissector.py b/tools/parse_xml2skinny_dissector.py index b13776e324..90ed65680b 100755 --- a/tools/parse_xml2skinny_dissector.py +++ b/tools/parse_xml2skinny_dissector.py @@ -239,7 +239,7 @@ def xml2obj(src): def get_req_resp_keys(self, req_resp): for field in self._children: key = field.get_req_resp_key() - if not key is None and not key in req_resp: + if key is not None and key not in req_resp: req_resp.append(key) def declaration(self): @@ -292,7 +292,7 @@ def xml2obj(src): self.decr_indent() ret += self.indent_out('}\n') - return ret; + return ret class Integer(DataNode): def __init__(self): @@ -343,12 +343,12 @@ def xml2obj(src): ret += self.indent_out('{\n') self.incr_indent() variable = 'counter_%d' %indentation - ret += self.indent_out('uint32_t %s = 0;\n' %(variable)); + ret += self.indent_out('uint32_t %s = 0;\n' %(variable)) if self.size_fieldname: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname)) else: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size)) - ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)); + ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)) if self.basemessage.dynamic == "no" and self.size_fieldname: self.incr_indent() ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname)) @@ -417,7 +417,6 @@ def xml2obj(src): def declaration(self): ret = '' - prevvalue = 0 enum_sizes = {'uint32':4,'uint16':2,'uint8':1} if self.type in enum_sizes: self.intsize = enum_sizes[self.type] @@ -477,12 +476,12 @@ def xml2obj(src): ret += self.indent_out('{\n') self.incr_indent() variable = 'counter_%d' %indentation - ret += self.indent_out('uint32_t %s = 0;\n' %(variable)); + ret += self.indent_out('uint32_t %s = 0;\n' %(variable)) if self.size_fieldname: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref: %s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname)) else: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size)) - ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)); + ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)) if self.basemessage.dynamic == "no" and self.size_fieldname: self.incr_indent() ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname)) @@ -755,9 +754,9 @@ def xml2obj(src): def dissect(self): ret = '' if self.make_additional_info == "yes": - ret += self.indent_out('read_skinny_ipv4or6(cursor, &%s);\n' %(self.name)); - ret += self.indent_out('dissect_skinny_ipv4or6(cursor, hf_skinny_%s_ipv4, hf_skinny_%s_ipv6);\n' %(self.name, self.name)); - return ret; + ret += self.indent_out('read_skinny_ipv4or6(cursor, &%s);\n' %(self.name)) + ret += self.indent_out('dissect_skinny_ipv4or6(cursor, hf_skinny_%s_ipv4, hf_skinny_%s_ipv6);\n' %(self.name, self.name)) + return ret class XML(DataNode): def __init__(self): @@ -848,7 +847,7 @@ def xml2obj(src): self.incr_indent() if debug: ret += self.indent_out('/* start struct : %s / size: %d */\n' %(self.name, self.intsize)) - ret += self.indent_out('uint32_t %s = 0;\n' %(variable)); + ret += self.indent_out('uint32_t %s = 0;\n' %(variable)) if self.size_fieldname: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, self.maxsize, self.size_fieldname)) if self.maxsize: @@ -859,7 +858,7 @@ def xml2obj(src): else: ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size)) - ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)); + ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable)) if self.basemessage.dynamic == "no" and self.size_fieldname: self.incr_indent() ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname)) @@ -906,7 +905,7 @@ def xml2obj(src): if self.size_fieldname: ret += self.indent_out('} else {\n') self.incr_indent() - ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(self.size_fieldname, self.intsize, ' /* guard kicked in -> skip the rest */' if debug else '')); + ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(self.size_fieldname, self.intsize, ' /* guard kicked in -> skip the rest */' if debug else '')) self.decr_indent() ret += self.indent_out('} /* end struct size guard */\n' if debug else '}\n') @@ -948,7 +947,6 @@ def xml2obj(src): def dissect(self): ret = '' ifblock = self.indent_out('if') - skip = 0 #ret += self.indent_out('/* Union : %s / maxsize: %s */\n' %(self.name, self.maxsize)) if (self.fields is not None and len(self.fields)): diff --git a/tools/pre-commit-ignore.py b/tools/pre-commit-ignore.py index 63ecf3e830..d06990344f 100755 --- a/tools/pre-commit-ignore.py +++ b/tools/pre-commit-ignore.py @@ -7,7 +7,6 @@ # SPDX-License-Identifier: GPL-2.0-or-later import sys -import os import fnmatch IGNORE_CONF = "pre-commit-ignore.conf" @@ -29,8 +28,8 @@ def load_checkignore(path): patterns = f.read() except OSError as err: sys.exit(str(err)) - ign = [l.strip() for l in patterns.splitlines()] - ign = [l for l in ign if l and not l.startswith("#")] + ign = [line.strip() for line in patterns.splitlines()] + ign = [line for line in ign if line and not line.startswith("#")] return ign ignore_list = load_checkignore(ignore_path) diff --git a/tools/validate-commit.py b/tools/validate-commit.py index f5fd971c28..b5d85afe5c 100755 --- a/tools/validate-commit.py +++ b/tools/validate-commit.py @@ -19,7 +19,6 @@ import json import os import subprocess import sys -import tempfile import urllib.request import re @@ -227,8 +226,8 @@ def main(): try: with open(args.commitmsg) as f: return 0 if verify_body(f.read()) else 1 - except: - print("Couldn't verify body of message from file '", + args.commitmsg + "'"); + except Exception: + print("Couldn't verify body of message from file '", + args.commitmsg + "'") return 1