diff options
author | 2021-08-25 17:47:43 +0000 | |
---|---|---|
committer | 2021-08-27 00:30:41 +0000 | |
commit | 2c2219b60e2a25f03b75176a4f27b1ff4a86ebca (patch) | |
tree | cac1bc6133a3d6a79f79086edf8091169509b658 /scripts/hiddenapi/merge_csv.py | |
parent | 7c16dabfa595cfe38b99c5b0f92aca6eec440de0 (diff) |
Apply pylint to remaining scripts in hiddenapi
1. Run pyformat scripts/hiddenapi -s 4
--force_quote_type none -i to fix formatting.
2. rename restricted variable names (e.g. variable name "input" has been
changed to "csvfile")
3. use pylint: disable=<X> where fixes are not obvious
Test: m merge_csv signature_patterns signature_patterns_test
Test: pylint --rcfile tools/repohooks/tools/pylintrc <file1>
<file1_test>
Bug: 195738175
Change-Id: I800a208f9c0ee1d32e68e4b20fd5933b3ab92c0e
Diffstat (limited to 'scripts/hiddenapi/merge_csv.py')
-rwxr-xr-x | scripts/hiddenapi/merge_csv.py | 71 |
1 files changed, 47 insertions, 24 deletions
diff --git a/scripts/hiddenapi/merge_csv.py b/scripts/hiddenapi/merge_csv.py index a65326c51..c17ec25f1 100755 --- a/scripts/hiddenapi/merge_csv.py +++ b/scripts/hiddenapi/merge_csv.py @@ -13,8 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -Merge multiple CSV files, possibly with different columns. +"""Merge multiple CSV files, possibly with different columns. """ import argparse @@ -26,34 +25,52 @@ import operator from zipfile import ZipFile -args_parser = argparse.ArgumentParser(description='Merge given CSV files into a single one.') -args_parser.add_argument('--header', help='Comma separated field names; ' - 'if missing determines the header from input files.') -args_parser.add_argument('--zip_input', help='Treat files as ZIP archives containing CSV files to merge.', - action="store_true") -args_parser.add_argument('--key_field', help='The name of the field by which the rows should be sorted. ' - 'Must be in the field names. ' - 'Will be the first field in the output. ' - 'All input files must be sorted by that field.') -args_parser.add_argument('--output', help='Output file for merged CSV.', - default='-', type=argparse.FileType('w')) +args_parser = argparse.ArgumentParser( + description='Merge given CSV files into a single one.' +) +args_parser.add_argument( + '--header', + help='Comma separated field names; ' + 'if missing determines the header from input files.', +) +args_parser.add_argument( + '--zip_input', + help='Treat files as ZIP archives containing CSV files to merge.', + action="store_true", +) +args_parser.add_argument( + '--key_field', + help='The name of the field by which the rows should be sorted. ' + 'Must be in the field names. ' + 'Will be the first field in the output. ' + 'All input files must be sorted by that field.', +) +args_parser.add_argument( + '--output', + help='Output file for merged CSV.', + default='-', + type=argparse.FileType('w'), +) args_parser.add_argument('files', nargs=argparse.REMAINDER) args = args_parser.parse_args() -def dict_reader(input): - return csv.DictReader(input, delimiter=',', quotechar='|') +def dict_reader(csvfile): + return csv.DictReader(csvfile, delimiter=',', quotechar='|') + csv_readers = [] -if not(args.zip_input): +if not args.zip_input: for file in args.files: csv_readers.append(dict_reader(open(file, 'r'))) else: for file in args.files: - with ZipFile(file) as zip: - for entry in zip.namelist(): + with ZipFile(file) as zipfile: + for entry in zipfile.namelist(): if entry.endswith('.uau'): - csv_readers.append(dict_reader(io.TextIOWrapper(zip.open(entry, 'r')))) + csv_readers.append( + dict_reader(io.TextIOWrapper(zipfile.open(entry, 'r'))) + ) if args.header: fieldnames = args.header.split(',') @@ -73,8 +90,8 @@ if len(csv_readers) > 0: keyField = args.key_field if keyField: assert keyField in fieldnames, ( - "--key_field {} not found, must be one of {}\n").format( - keyField, ",".join(fieldnames)) + "--key_field {} not found, must be one of {}\n" + ).format(keyField, ",".join(fieldnames)) # Make the key field the first field in the output keyFieldIndex = fieldnames.index(args.key_field) fieldnames.insert(0, fieldnames.pop(keyFieldIndex)) @@ -83,11 +100,17 @@ if len(csv_readers) > 0: all_rows = heapq.merge(*csv_readers, key=operator.itemgetter(keyField)) # Write all rows from the input files to the output: -writer = csv.DictWriter(args.output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, - dialect='unix', fieldnames=fieldnames) +writer = csv.DictWriter( + args.output, + delimiter=',', + quotechar='|', + quoting=csv.QUOTE_MINIMAL, + dialect='unix', + fieldnames=fieldnames, +) writer.writeheader() # Read all the rows from the input and write them to the output in the correct # order: for row in all_rows: - writer.writerow(row) + writer.writerow(row) |