1

コンテキストを設定するために、200〜300個のファイルを含むディレクトリがあります。各ファイルのサイズは(行数)です。ファイルを一時停止して、csvファイルにエクスポートします。前回実行したとき、csvファイルには340,000行を超えていたと思います。その上、最初の8つのファイルは常に書き込まれているため、解析中にデータが失われることがあります。

これで、各ファイルは次のように設定されます。

DateTime Message Action ActionDetails

すべてのファイルを調べて解析し、csvファイルに出力するためのコードが用意されています。

for infile in listing:
    _path2 = _path + infile
    f = open(_path2, 'r')
    labels = ['date', 'message', 'action', 'details']
    reader = csv.DictReader(f, labels, delimiter=' ', restkey='rest')

    for line in reader:
        if line.get('rest'):
            line['details'] += ' %s' % (' '.join(line['rest']))
        out_file.write(','.join([infile,line['date'], line['message'], line['action'], line['details']]) + '\n')

    f.close()
out_file.close()

解析中にデータが失われないように、最初の8つのファイルをコピーするための「最良の」方法は何でしょうか。現時点でPythonスクリプトを実行する合計時間は約35〜45秒であるため、最短の時間を意味します。

4

1 に答える 1

2

少し退屈しました。サイズについてはこれを試してください。私は実際にそれが正しく解析および書き込みされているかどうかを確認する機会がありませんでしたが、それ以外は、いくつかの情報があれば実行する必要があると思います。この問題は、キューイングを使用する良い機会です。どれだけ速く動くか教えてください!

from threading import Thread
import Queue
import os
import time
import sys

# declare some global items
# queue that an author thread can write line items to a csv
write_q = Queue.Queue()

# queue filled with files to parse 
read_q = Queue.Queue()

# queue filled with files that have size change during read. Can
# preload this queue to optimize however program should handle any
# file that changes during operation
moving_q = Queue.Queue()

# given csv labels
labels = ['date', 'message', 'action', 'details']

# global for writer thread so it knows when to close
files_to_parse = True

# parsing function for any number of threads
def file_parser():    
    # Each parser thread will run until the read_q is empty
    while True:
        moving = False
        # Test for a file from the read queue or moving queue 
        try:
            if not moving_q.empty():
                try:
                    f_path = moving_q.get(False)
                    moving = True
                # if the moving queue is empty after trying to read
                # might have been snatched by different thread. Ignore error
                except Queue.Empty:
                    pass
            else:
                # No items left in moving queue so grab non moving file
                f_path = read_q.get(False)
        # all files have been dealt with
        except Queue.Empty:
            print "Done Parsing"
            sys.exit()

        # Following will parse a file and test that the file is not being
        # modified during the read
        with open(f_path, 'r') as f:
            # csv reader setup
            reader = csv.DictReader(f, labels, delimiter=' ', restkey='rest')

            # initillized file size (when we started reading)
            pre = os.path.getsize(f_path)

            # store output items in a list so if file is updated during read
            # we can just ignore those items and read file later
            line_items = []

            # parse the file line by line
            for line in reader:
                # Check that file hasn't been updated
                post = os.path.getsize(f_path)
                if pre != post:
                    # if file has changed put the file back on the queue and clear the output lines
                    moving_q.put(f_path)
                    line_items = None
                    break
                # parse the line and add it to output list
                else:
                    if line.get('rest'):
                        line['details'] += ' %s' % (' '.join(line['rest']))
                        line_items.append(','.join([infile,line['date'], line['message'], line['action'], line['details']]) + '\n')

            # don't want to do reading and writing in same thread. Push
            # all line items onto the write thread for the author to deal with    
            if line_items and moving:
                write_q.put(line_items)
                moving_q.task_done()
            elif line_items and not moving:
                write_q.put(line_items)
                read_q.task_done()

# author thread that will write items to a file as other threads complete
# tasks. Should help speed up IO bound processing
def file_author(out_file):
    with open(out_file,'w') as f:
        # parse files until all the parser threads are running
        while files_to_parse or not read_q.empty():
            # only one writer thread so write as items are put into thread
            if not read_q.empty():
                line_items = write_q.get(False)
                for line_item in line_items:
                    f.write(line_item)
                write_q.task_done()
            # sleep in the downtime so we dont overload PC
            else:
                time.sleep(.1)
    print "Done writting"


if __name__ == "__main__":
    # list of file names as you had before
    listing = []
    outfile = "MyNewCSVfile.csv"

    # You can optimize parsing by adding known "moving files" directly
    # to the moving_queue, however program should handle either way
    for infile in listing:
        _path2 = _path + infile
        write_q.put(_path2)

    # make a writer thread
    t = Thread(target = file_author, args = (outfile,))
    t.daemon = True
    t.start()

    # make some parse threads
    for i in range(10):
        t = Thread(target = file_parser)
        t.daemon = True
        t.start()

    # wait for parser threads to finish work
    read_q.join()
    moving_q.join()

    # close author
    files_to_parse = False
    time.sleep(.1)
    print "Complete"
于 2013-01-28T20:30:19.567 に答える