Adaboost模型网格搜索法fit报错[Errno 22] Invalid argument,什么原因怎么解决?

from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier

max_depth = [3,4,5,6]
params1 = {'base_estimator__max_depth':max_depth}
base_model = GridSearchCV(estimator = ensemble.AdaBoostClassifier(base_estimator = DecisionTreeClassifier()),param_grid=params1,
                           scoring='roc_auc',cv=5,n_jobs=4,verbose=1)
**base_model.fit(X_train[predictors],y_train)**
#返回参数的最佳组合和对应的AUC值
base_model.best_params_,base_mode.best_score_

错误原因:

Fitting 5 folds for each of 4 candidates, totalling 20 fits
---------------------------------------------------------------------------
_RemoteTraceback                          Traceback (most recent call last)
_RemoteTraceback: 
"""
Traceback (most recent call last):
  File "D:\lib\site-packages\joblib\externals\loky\backend\queues.py", line 153, in _feed
    obj_ = dumps(obj, reducers=reducers)
  File "D:\lib\site-packages\joblib\externals\loky\backend\reduction.py", line 271, in dumps
    dump(obj, buf, reducers=reducers, protocol=protocol)
  File "D:\lib\site-packages\joblib\externals\loky\backend\reduction.py", line 264, in dump
    _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
  File "D:\lib\site-packages\joblib\externals\cloudpickle\cloudpickle_fast.py", line 602, in dump
    return Pickler.dump(self, obj)
  File "D:\lib\site-packages\joblib\_memmapping_reducer.py", line 427, in __call__
    resource_tracker.register(filename, "file")
  File "D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py", line 190, in register
    self.ensure_running()
  File "D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py", line 102, in ensure_running
    if self._check_alive():
  File "D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py", line 182, in _check_alive
    self._send('PROBE', '', '')
  File "D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py", line 209, in _send
    nbytes = os.write(self._fd, msg)
OSError: [Errno 22] Invalid argument
"""

The above exception was the direct cause of the following exception:

PicklingError                             Traceback (most recent call last)
File D:\lib\site-packages\joblib\parallel.py:935, in Parallel.retrieve(self)
    934 if getattr(self._backend, 'supports_timeout', False):
--> 935     self._output.extend(job.get(timeout=self.timeout))
    936 else:

File D:\lib\site-packages\joblib\_parallel_backends.py:542, in LokyBackend.wrap_future_result(future, timeout)
    541 try:
--> 542     return future.result(timeout=timeout)
    543 except CfTimeoutError as e:

File D:\lib\concurrent\futures\_base.py:446, in Future.result(self, timeout)
    445 elif self._state == FINISHED:
--> 446     return self.__get_result()
    447 else:

File D:\lib\concurrent\futures\_base.py:391, in Future.__get_result(self)
    390 try:
--> 391     raise self._exception
    392 finally:
    393     # Break a reference cycle with the exception in self._exception

PicklingError: Could not pickle the task to send it to the workers.

During handling of the above exception, another exception occurred:

OSError                                   Traceback (most recent call last)
Input In [15], in <cell line: 5>()
      2 params1 = {'base_estimator__max_depth':max_depth}
      3 base_model = GridSearchCV(estimator = ensemble.AdaBoostClassifier(base_estimator = DecisionTreeClassifier()),param_grid=params1,
      4                            scoring='roc_auc',cv=5,n_jobs=4,verbose=1)
----> 5 base_model.fit(X_train[predictors],y_train)
      6 #返回参数的最佳组合和对应的AUC值
      7 base_model.best_params_,base_mode.best_score_

File D:\lib\site-packages\sklearn\model_selection\_search.py:891, in BaseSearchCV.fit(self, X, y, groups, **fit_params)
    885     results = self._format_results(
    886         all_candidate_params, n_splits, all_out, all_more_results
    887     )
    889     return results
--> 891 self._run_search(evaluate_candidates)
    893 # multimetric is determined here because in the case of a callable
    894 # self.scoring the return type is only known after calling
    895 first_test_score = all_out[0]["test_scores"]

File D:\lib\site-packages\sklearn\model_selection\_search.py:1392, in GridSearchCV._run_search(self, evaluate_candidates)
   1390 def _run_search(self, evaluate_candidates):
   1391     """Search all candidates in param_grid"""
-> 1392     evaluate_candidates(ParameterGrid(self.param_grid))

File D:\lib\site-packages\sklearn\model_selection\_search.py:838, in BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv, more_results)
    830 if self.verbose > 0:
    831     print(
    832         "Fitting {0} folds for each of {1} candidates,"
    833         " totalling {2} fits".format(
    834             n_splits, n_candidates, n_candidates * n_splits
    835         )
    836     )
--> 838 out = parallel(
    839     delayed(_fit_and_score)(
    840         clone(base_estimator),
    841         X,
    842         y,
    843         train=train,
    844         test=test,
    845         parameters=parameters,
    846         split_progress=(split_idx, n_splits),
    847         candidate_progress=(cand_idx, n_candidates),
    848         **fit_and_score_kwargs,
    849     )
    850     for (cand_idx, parameters), (split_idx, (train, test)) in product(
    851         enumerate(candidate_params), enumerate(cv.split(X, y, groups))
    852     )
    853 )
    855 if len(out) < 1:
    856     raise ValueError(
    857         "No fits were performed. "
    858         "Was the CV iterator empty? "
    859         "Were there no candidates?"
    860     )

File D:\lib\site-packages\joblib\parallel.py:1056, in Parallel.__call__(self, iterable)
   1053     self._iterating = False
   1055 with self._backend.retrieval_context():
-> 1056     self.retrieve()
   1057 # Make sure that we get a last message telling us we are done
   1058 elapsed_time = time.time() - self._start_time

File D:\lib\site-packages\joblib\parallel.py:957, in Parallel.retrieve(self)
    951 if (backend is not None and
    952         hasattr(backend, 'abort_everything')):
    953     # If the backend is managed externally we need to make sure
    954     # to leave it in a working state to allow for future jobs
    955     # scheduling.
    956     ensure_ready = self._managed_backend
--> 957     backend.abort_everything(ensure_ready=ensure_ready)
    958 raise

File D:\lib\site-packages\joblib\_parallel_backends.py:565, in LokyBackend.abort_everything(self, ensure_ready)
    562 self._workers = None
    564 if ensure_ready:
--> 565     self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)

File D:\lib\site-packages\joblib\_parallel_backends.py:494, in LokyBackend.configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args)
    490 if n_jobs == 1:
    491     raise FallbackToBackend(
    492         SequentialBackend(nesting_level=self.nesting_level))
--> 494 self._workers = get_memmapping_executor(
    495     n_jobs, timeout=idle_worker_timeout,
    496     env=self._prepare_worker_env(n_jobs=n_jobs),
    497     context_id=parallel._id, **memmappingexecutor_args)
    498 self.parallel = parallel
    499 return n_jobs

File D:\lib\site-packages\joblib\executor.py:20, in get_memmapping_executor(n_jobs, **kwargs)
     19 def get_memmapping_executor(n_jobs, **kwargs):
---> 20     return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)

File D:\lib\site-packages\joblib\executor.py:42, in MemmappingExecutor.get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args)
     39 reuse = _executor_args is None or _executor_args == executor_args
     40 _executor_args = executor_args
---> 42 manager = TemporaryResourcesManager(temp_folder)
     44 # reducers access the temporary folder in which to store temporary
     45 # pickles through a call to manager.resolve_temp_folder_name. resolving
     46 # the folder name dynamically is useful to use different folders across
     47 # calls of a same reusable executor
     48 job_reducers, result_reducers = get_memmapping_reducers(
     49     unlink_on_gc_collect=True,
     50     temp_folder_resolver=manager.resolve_temp_folder_name,
     51     **backend_args)

File D:\lib\site-packages\joblib\_memmapping_reducer.py:531, in TemporaryResourcesManager.__init__(self, temp_folder_root, context_id)
    525 if context_id is None:
    526     # It would be safer to not assign a default context id (less silent
    527     # bugs), but doing this while maintaining backward compatibility
    528     # with the previous, context-unaware version get_memmaping_executor
    529     # exposes exposes too many low-level details.
    530     context_id = uuid4().hex
--> 531 self.set_current_context(context_id)

File D:\lib\site-packages\joblib\_memmapping_reducer.py:535, in TemporaryResourcesManager.set_current_context(self, context_id)
    533 def set_current_context(self, context_id):
    534     self._current_context_id = context_id
--> 535     self.register_new_context(context_id)

File D:\lib\site-packages\joblib\_memmapping_reducer.py:560, in TemporaryResourcesManager.register_new_context(self, context_id)
    553 new_folder_name = (
    554     "joblib_memmapping_folder_{}_{}_{}".format(
    555         os.getpid(), self._id, context_id)
    556 )
    557 new_folder_path, _ = _get_temp_dir(
    558     new_folder_name, self._temp_folder_root
    559 )
--> 560 self.register_folder_finalizer(new_folder_path, context_id)
    561 self._cached_temp_folders[context_id] = new_folder_path

File D:\lib\site-packages\joblib\_memmapping_reducer.py:590, in TemporaryResourcesManager.register_folder_finalizer(self, pool_subfolder, context_id)
    583 def register_folder_finalizer(self, pool_subfolder, context_id):
    584     # Register the garbage collector at program exit in case caller forgets
    585     # to call terminate explicitly: note we do not pass any reference to
    586     # ensure that this callback won't prevent garbage collection of
    587     # parallel instance and related file handler resources such as POSIX
    588     # semaphores and pipes
    589     pool_module_name = whichmodule(delete_folder, 'delete_folder')
--> 590     resource_tracker.register(pool_subfolder, "folder")
    592     def _cleanup():
    593         # In some cases the Python runtime seems to set delete_folder to
    594         # None just before exiting when accessing the delete_folder
   (...)
    599         # because joblib should only use relative imports to allow
    600         # easy vendoring.
    601         delete_folder = __import__(
    602             pool_module_name, fromlist=['delete_folder']).delete_folder

File D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:190, in ResourceTracker.register(self, name, rtype)
    188 def register(self, name, rtype):
    189     '''Register a named resource, and increment its refcount.'''
--> 190     self.ensure_running()
    191     self._send('REGISTER', name, rtype)

File D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:102, in ResourceTracker.ensure_running(self)
     99 with self._lock:
    100     if self._fd is not None:
    101         # resource tracker was launched before, is it still running?
--> 102         if self._check_alive():
    103             # => still alive
    104             return
    105         # => dead, launch it again

File D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:182, in ResourceTracker._check_alive(self)
    180 '''Check for the existence of the resource tracker process.'''
    181 try:
--> 182     self._send('PROBE', '', '')
    183 except BrokenPipeError:
    184     return False

File D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:209, in ResourceTracker._send(self, cmd, name, rtype)
    205 if len(name) > 512:
    206     # posix guarantees that writes to a pipe of less than PIPE_BUF
    207     # bytes are atomic, and that PIPE_BUF >= 512
    208     raise ValueError('name too long')
--> 209 nbytes = os.write(self._fd, msg)
    210 assert nbytes == len(msg)

OSError: [Errno 22] Invalid argument