增加多进程时间

时间:2018-04-13 07:55:46

标签: numpy multiprocessing

我定义了一个多进程脚本来改进图像分析。它似乎运作良好,但我尝试做几个测试,以定义最佳的进程数。 它包括改变这个过程的数量。因为有一些分散,我添加一个循环,以重复我的测试一百次。 但在此过程中,时间显着增加。可能是我问题的根源是什么?我要冲洗记忆吗?但它似乎没有饱和。

我的一段代码:

from multiprocessing import Process, current_process
import multiprocessing 
import glob as glob
import matplotlib.pyplot as plt
from skimage import io
import time
import sys
import numpy as np
import numpy.ma as ma
import gc
import os
from PIL import Image
from skimage import exposure
import cv2 


Path_input = "E:\\test\\raw\\"
Path_output = "E:\\test\\"

Img_list = glob.glob((Path_input + 'Test_*.tif' ))[:]
size_y,size_x = io.imread(Img_list[0]).shape

#Function for the multi process
def Ajustement(x):
    #image reading
    img = plt.imread(Img_list[x])

    #create a CLAHE object
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(img_rescale.astype(np.uint16))    

    cv2.imwrite(Path_output+ '\\Ajusted' + "%05d" % x + '.tif',cl1) 

    return 'Ajustement OK!'


#create strings list of process
cpu_max = 10
list = ['Process_']*cpu_max
list_process =[]
counter = 1
for u in list:
    list_process.append(list[counter-1]+np.str(counter))
    counter = counter+1

get_timer = time.clock if sys.platform == "win32" else time.time

time_store = []
time_process = []

if __name__ == '__main__':
    range_adjusted = np.arange(0,len(Img_list),cpu_max)
    m=0
    for m in range(0,100,1): #loop for obtain a mean time for the process
        print m
        timer = get_timer()  # time measuring starts now
        for i in range_adjusted:
            o = 0
            for item in list_process[:cpu_max]: #process creation
                globals()[item] = Process(name ='worker1', target=Normalization_and_ajustement, args=(i+o,))
                o=o+1               
            o = 0
            for item in list_process[:cpu_max]: #process start
                globals()[item].start() 
                o=o+1               
            o = 0
            for item in list_process[:cpu_max]: #process join
                globals()[item].join()  
                o=o+1   

            if i == range_adjusted.max():
                print("Normalization and Equalization finished")

                timer = get_timer() - timer  # get delta time as soon as it finishes
                time_store.append(timer)
                time_process.append(timer/cpu_max)

                np.savetxt(Path_output + 'time_tot_normalization.txt',time_store)
                np.savetxt(Path_output + 'time_process_normalization.txt',time_process)

                print("\tTotal: {:.2f} seconds".format(timer))
                print("\tAvg. per process: {:.2f} seconds".format(timer/cpu_max))
        m=m+1

1 个答案:

答案 0 :(得分:1)

我认为这是由于内存泄漏造成的。确实,我在每个循环后添加了gc.collect()命令,问题就解决了。