2

我正在运行这段小代码来识别学习率:

import cv2
from fastai.vision import *
from fastai.callbacks.hooks import *
path = untar_data(URLs.CAMVID)
path_lbl = path/'labels'
path_img = path/'images'
fnames = get_image_files(path_img)
lbl_names = get_image_files(path_lbl)
img_f = fnames[0]
img = open_image(img_f)
get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
mask = open_mask(get_y_fn(img_f))
src_size = np.array(mask.shape[1:])
src_size,mask.data
codes = np.loadtxt(path/'codes.txt', dtype=str); codes
size = src_size//2
bs=4
src = (SegmentationItemList.from_folder(path_img)
       .split_by_fname_file('../valid.txt')
       .label_from_func(get_y_fn, classes=codes))
data = (src.transform(get_transforms(), size=size, tfm_y=True)
        .databunch(bs=bs)
        .normalize(imagenet_stats))
name2id = {v:k for k,v in enumerate(codes)}
void_code = name2id['Void']
def acc_camvid(input, target):
    target = target.squeeze(1)
    mask = target != void_code
    return (input.argmax(dim=1)[mask]==target[mask]).float().mean()
wd=1e-2
learn = unet_learner(data, models.resnet34, metrics=acc_camvid, wd=wd)
lr_find(learn)
print("end")

我得到这个错误:

RuntimeError: 
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.

还有这个:

Traceback (most recent call last):
  File "C:\Program Files\JetBrains\PyCharm Community Edition 2018.1.4\helpers\pydev\pydevd.py", line 1664, in <module>
    main()
  File "C:\Program Files\JetBrains\PyCharm Community Edition 2018.1.4\helpers\pydev\pydevd.py", line 1658, in main
    globals = debugger.run(setup['file'], None, None, is_module)
  File "C:\Program Files\JetBrains\PyCharm Community Edition 2018.1.4\helpers\pydev\pydevd.py", line 1068, in run
    pydev_imports.execfile(file, globals, locals)  # execute the script
  File "C:\Program Files\JetBrains\PyCharm Community Edition 2018.1.4\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "C:/Users/steve/Project/fastai_unet/main.py", line 32, in <module>
    lr_find(learn)
  File "C:\Users\steve\Miniconda3\lib\site-packages\fastai\train.py", line 32, in lr_find
    learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
  File "C:\Users\steve\Miniconda3\lib\site-packages\fastai\basic_train.py", line 199, in fit
    fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
  File "C:\Users\steve\Miniconda3\lib\site-packages\fastai\basic_train.py", line 99, in fit
    for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
  File "C:\Users\steve\Miniconda3\lib\site-packages\fastprogress\fastprogress.py", line 72, in __iter__
    for i,o in enumerate(self._gen):
  File "C:\Users\steve\Miniconda3\lib\site-packages\fastai\basic_data.py", line 75, in __iter__
    for b in self.dl: yield self.proc_batch(b)
  File "C:\Users\steve\Miniconda3\lib\site-packages\torch\utils\data\dataloader.py", line 193, in __iter__
    return _DataLoaderIter(self)
  File "C:\Users\steve\Miniconda3\lib\site-packages\torch\utils\data\dataloader.py", line 469, in __init__
    w.start()
  File "C:\Users\steve\Miniconda3\lib\multiprocessing\process.py", line 105, in start
    self._popen = self._Popen(self)
  File "C:\Users\steve\Miniconda3\lib\multiprocessing\context.py", line 223, in _Popen
    return _default_context.get_context().Process._Popen(process_obj)
  File "C:\Users\steve\Miniconda3\lib\multiprocessing\context.py", line 322, in _Popen
    return Popen(process_obj)
  File "C:\Users\steve\Miniconda3\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
    reduction.dump(process_obj, to_child)
  File "C:\Users\steve\Miniconda3\lib\multiprocessing\reduction.py", line 60, in dump
    ForkingPickler(file, protocol).dump(obj)
BrokenPipeError: [Errno 32] Broken pipe

我怎样才能解决这个问题?

4

2 回答 2

2

哦,解决方案是将代码包装在一个方法中并调用它:

import cv2
from fastai.vision import *
from fastai.callbacks.hooks import *

def main():
    path = untar_data(URLs.CAMVID)
    path_lbl = path/'labels'
    path_img = path/'images'
    fnames = get_image_files(path_img)
    lbl_names = get_image_files(path_lbl)
    get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
    mask = open_mask(get_y_fn(img_f))
    src_size = np.array(mask.shape[1:])
    src_size,mask.data
    codes = np.loadtxt(path/'codes.txt', dtype=str); codes
    size = src_size//2
    bs=4
    src = (SegmentationItemList.from_folder(path_img)
           .split_by_fname_file('../valid.txt')
           .label_from_func(get_y_fn, classes=codes))
    data = (src.transform(get_transforms(), size=size, tfm_y=True)
            .databunch(bs=bs)
            .normalize(imagenet_stats))
    name2id = {v:k for k,v in enumerate(codes)}
    void_code = name2id['Void']
    def acc_camvid(input, target):
        target = target.squeeze(1)
        mask = target != void_code
        return (input.argmax(dim=1)[mask]==target[mask]).float().mean()
    wd=1e-2
    learn = unet_learner(data, models.resnet34, metrics=acc_camvid, wd=wd)
    lr_find(learn)
    print("end")
if __name__ == '__main__':
    main()
于 2019-05-07T19:31:40.540 回答
0

就我而言,在 Windows 下运行时,我必须在数据加载器中将 num_workers 的数量设置为 0。在这里,我认为这可以在 databunch(bs=bs) 中设置。

于 2022-02-10T08:58:57.303 回答