using_converter
これは、わずかに高速なメソッドです。
import itertools as IT
header_map = {'Foo':str,
'Bar':str,
'FooBar':float}
N = 20000
csv_data = [('Foo', 'Bar', 'FooBar')] + [('Foo', 'Bar', 1123.451)]*N
def original(csv_data):
index_map = {}
#enumerate the rows and create a dictionary of index:function
for i, header in enumerate(csv_data[0]):
index_map[i] = header_map[header]
#retrieve the appropriate function for each index and call it on the value
new_csv = [[index_map[i](value) for i, value in enumerate(row)]
for row in csv_data[1:]]
return new_csv
def using_converter(csv_data):
converters = IT.cycle([header_map[header] for header in csv_data[0]])
conv = converters.next
new_csv = [[conv()(item) for item in row] for row in csv_data[1:]]
return new_csv
def using_header_map(csv_data):
heads = csv_data[0]
new_csv = [
tuple(header_map[head](item) for head, item in zip(heads, row))
for row in csv_data[1:]]
return new_csv
# print(original(csv_data))
# print(using_converter(csv_data))
# print(using_header_map(csv_data))
ベンチマークtimeit
:
元のコード:
% python -mtimeit -s'import test' 'test.original(test.csv_data)'
100 loops, best of 3: 17.3 msec per loop
少し高速なバージョン (itertools を使用):
% python -mtimeit -s'import test' 'test.using_converter(test.csv_data)'
100 loops, best of 3: 15.5 msec per loop
Lev Levitsky のバージョン:
% python -mtimeit -s'import test' 'test.using_header_map(test.csv_data)'
10 loops, best of 3: 36.2 msec per loop