jupyter notebook中进行pyLDAvis可视化出现报错

 输入以下代码进行pyLDAvis可视化,出现了报错

vis= pyLDAvis.gensim.prepare(ldamodel,corpus,dictionary)

 

 报错代码如下:

---------------------------------------------------------------------------
OSError                                   Traceback (most recent call last)
<ipython-input-11-10ca0d2a3400> in <module>
----> 1 vis= pyLDAvis.gensim.prepare(ldamodel,corpus,dictionary)

D:\lib\site-packages\pyLDAvis\gensim.py in prepare(topic_model, corpus, dictionary, doc_topic_dist, **kwargs)
    117     """
    118     opts = fp.merge(_extract_data(topic_model, corpus, dictionary, doc_topic_dist), kwargs)
--> 119     return vis_prepare(**opts)

D:\lib\site-packages\pyLDAvis\_prepare.py in prepare(topic_term_dists, doc_topic_dists, doc_lengths, vocab, term_frequency, R, lambda_step, mds, n_jobs, plot_opts, sort_topics)
    396    term_frequency = np.sum(term_topic_freq, axis=0)
    397 
--> 398    topic_info         = _topic_info(topic_term_dists, topic_proportion, term_frequency, term_topic_freq, vocab, lambda_step, R, n_jobs)
    399    token_table        = _token_table(topic_info, term_topic_freq, vocab, term_frequency)
    400    topic_coordinates = _topic_coordinates(mds, topic_term_dists, topic_proportion)

D:\lib\site-packages\pyLDAvis\_prepare.py in _topic_info(topic_term_dists, topic_proportion, term_frequency, term_topic_freq, vocab, lambda_step, R, n_jobs)
    253 
    254    top_terms = pd.concat(Parallel(n_jobs=n_jobs)(delayed(_find_relevance_chunks)(log_ttd, log_lift, R, ls) \
--> 255                                                  for ls in _job_chunks(lambda_seq, n_jobs)))
    256    topic_dfs = map(topic_top_term_df, enumerate(top_terms.T.iterrows(), 1))
    257    return pd.concat([default_term_info] + list(topic_dfs))

D:\lib\site-packages\joblib\parallel.py in __call__(self, iterable)
    952 
    953         if not self._managed_backend:
--> 954             n_jobs = self._initialize_backend()
    955         else:
    956             n_jobs = self._effective_n_jobs()

D:\lib\site-packages\joblib\parallel.py in _initialize_backend(self)
    720         try:
    721             n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
--> 722                                              **self._backend_args)
    723             if self.timeout is not None and not self._backend.supports_timeout:
    724                 warnings.warn(

D:\lib\site-packages\joblib\_parallel_backends.py in configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args)
    495             n_jobs, timeout=idle_worker_timeout,
    496             env=self._prepare_worker_env(n_jobs=n_jobs),
--> 497             context_id=parallel._id, **memmappingexecutor_args)
    498         self.parallel = parallel
    499         return n_jobs

D:\lib\site-packages\joblib\executor.py in get_memmapping_executor(n_jobs, **kwargs)
     18 
     19 def get_memmapping_executor(n_jobs, **kwargs):
---> 20     return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
     21 
     22 

D:\lib\site-packages\joblib\executor.py in get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args)
     40         _executor_args = executor_args
     41 
---> 42         manager = TemporaryResourcesManager(temp_folder)
     43 
     44         # reducers access the temporary folder in which to store temporary

D:\lib\site-packages\joblib\_memmapping_reducer.py in __init__(self, temp_folder_root, context_id)
    529             # exposes exposes too many low-level details.
    530             context_id = uuid4().hex
--> 531         self.set_current_context(context_id)
    532 
    533     def set_current_context(self, context_id):

D:\lib\site-packages\joblib\_memmapping_reducer.py in set_current_context(self, context_id)
    533     def set_current_context(self, context_id):
    534         self._current_context_id = context_id
--> 535         self.register_new_context(context_id)
    536 
    537     def register_new_context(self, context_id):

D:\lib\site-packages\joblib\_memmapping_reducer.py in register_new_context(self, context_id)
    558                 new_folder_name, self._temp_folder_root
    559             )
--> 560             self.register_folder_finalizer(new_folder_path, context_id)
    561             self._cached_temp_folders[context_id] = new_folder_path
    562 

D:\lib\site-packages\joblib\_memmapping_reducer.py in register_folder_finalizer(self, pool_subfolder, context_id)
    588         # semaphores and pipes
    589         pool_module_name = whichmodule(delete_folder, 'delete_folder')
--> 590         resource_tracker.register(pool_subfolder, "folder")
    591 
    592         def _cleanup():

D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in register(self, name, rtype)
    188     def register(self, name, rtype):
    189         '''Register a named resource, and increment its refcount.'''
--> 190         self.ensure_running()
    191         self._send('REGISTER', name, rtype)
    192 

D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in ensure_running(self)
    100             if self._fd is not None:
    101                 # resource tracker was launched before, is it still running?
--> 102                 if self._check_alive():
    103                     # => still alive
    104                     return

D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _check_alive(self)
    180         '''Check for the existence of the resource tracker process.'''
    181         try:
--> 182             self._send('PROBE', '', '')
    183         except BrokenPipeError:
    184             return False

D:\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _send(self, cmd, name, rtype)
    207             # bytes are atomic, and that PIPE_BUF >= 512
    208             raise ValueError('name too long')
--> 209         nbytes = os.write(self._fd, msg)
    210         assert nbytes == len(msg)
    211 

OSError: [Errno 22] Invalid argument

 

您好,请问您如何解决的

你好,我是问答小助手,本次您提出的有问必答问题,技术专家团超时未为您做出解答

本次提问扣除的有问必答次数,已经为您补发到账户,我们后续会持续优化,扩大我们的服务范围,为您带来更好地服务。