yolo v2 代码报错jupyter notebook

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in binary_op_wrapper(x, y)
   1244             r_op = getattr(y, "__r%s__" % op_name)
-> 1245             out = r_op(x)
   1246             if out is NotImplemented:

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in r_binary_op_wrapper(y, x)
   1265       #   r_binary_op_wrapper use different force_same_dtype values.
-> 1266       y, x = maybe_promote_tensors(y, x)
   1267       return func(x, y, name=name)

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in maybe_promote_tensors(force_same_dtype, *tensors)
   1201       promoted_tensors.append(
-> 1202           ops.convert_to_tensor(tensor, dtype, name="x"))
   1203     return promoted_tensors

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\profiler\trace.py in wrapped(*args, **kwargs)
    162           return func(*args, **kwargs)
--> 163       return func(*args, **kwargs)
    164 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
   1565     if ret is None:
-> 1566       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1567 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    338   _ = as_ref
--> 339   return constant(v, dtype=dtype, name=name)
    340 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name)
    263   """
--> 264   return _constant_impl(value, dtype, shape, name, verify_shape=False,
    265                         allow_broadcast=True)

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    275         return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 276     return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    277 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    300   """Implementation of eager constant."""
--> 301   t = convert_to_eager_tensor(value, ctx, dtype)
    302   if shape is None:

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     97   ctx.ensure_initialized()
---> 98   return ops.EagerTensor(value, ctx.device_name, dtype)
     99 

D:\pictureprocess\anaconda\lib\site-packages\keras\engine\keras_tensor.py in __array__(self)
    243   def __array__(self):
--> 244     raise TypeError(
    245         'Cannot convert a symbolic Keras input/output to a numpy array. '

TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)
<ipython-input-1-bcf61aa1007c> in <module>
    112 #Now yolo_eval function selects the best boxes using filtering and non-max suppression techniques.
    113 # If you want to dive in more to see how this works, refer keras_yolo.py file in yad2k/models
--> 114 boxes, scores, classes = yolo_eval(yolo_outputs, image_shape)
    115 
    116 

~\yad2k\models\keras_yolo.py in yolo_eval(yolo_outputs, image_shape, max_boxes, score_threshold, iou_threshold)
    337     image_dims = K.stack([height, width, height, width])
    338     image_dims = K.reshape(image_dims, [1, 4])
--> 339     boxes = boxes * image_dims
    340 
    341     # TODO: Something must be done about this ugly hack!

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in binary_op_wrapper(x, y)
   1248             return out
   1249           except (TypeError, ValueError):
-> 1250             raise e
   1251         else:
   1252           raise

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in binary_op_wrapper(x, y)
   1232         #   r_binary_op_wrapper use different force_same_dtype values.
   1233         x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
-> 1234         return func(x, y, name=name)
   1235       except (TypeError, ValueError) as e:
   1236         # Even if dispatching the op failed, the RHS may be a tensor aware

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in _mul_dispatch(x, y, name)
   1573     return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
   1574   else:
-> 1575     return multiply(x, y, name=name)
   1576 
   1577 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\util\dispatch.py in wrapper(*args, **kwargs)
    208       # Note: convert_to_eager_tensor currently raises a ValueError, not a
    209       # TypeError, when given unexpected types.  So we need to catch both.
--> 210       result = dispatch(wrapper, args, kwargs)
    211       if result is not OpDispatcher.NOT_SUPPORTED:
    212         return result

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\util\dispatch.py in dispatch(op, args, kwargs)
    124       return result
    125   for dispatcher in _GLOBAL_DISPATCHERS:
--> 126     result = dispatcher.handle(op, args, kwargs)
    127     if result is not OpDispatcher.NOT_SUPPORTED:
    128       return result

D:\pictureprocess\anaconda\lib\site-packages\keras\layers\core.py in handle(self, op, args, kwargs)
   1464         isinstance(x, keras_tensor.KerasTensor)
   1465         for x in tf.nest.flatten([args, kwargs])):
-> 1466       return TFOpLambda(op)(*args, **kwargs)
   1467     else:
   1468       return self.NOT_SUPPORTED

D:\pictureprocess\anaconda\lib\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
    943     # >> model = tf.keras.Model(inputs, outputs)
    944     if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 945       return self._functional_construction_call(inputs, args, kwargs,
    946                                                 input_list)
    947 

D:\pictureprocess\anaconda\lib\site-packages\keras\engine\base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
   1081         layer=self, inputs=inputs, build_graph=True, training=training_value):
   1082       # Check input assumptions set after layer building, e.g. input shape.
-> 1083       outputs = self._keras_tensor_symbolic_call(
   1084           inputs, input_masks, args, kwargs)
   1085 

D:\pictureprocess\anaconda\lib\site-packages\keras\engine\base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
    814       return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
    815     else:
--> 816       return self._infer_output_signature(inputs, args, kwargs, input_masks)
    817 
    818   def _infer_output_signature(self, inputs, args, kwargs, input_masks):

D:\pictureprocess\anaconda\lib\site-packages\keras\engine\base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
    854           self._maybe_build(inputs)
    855           inputs = self._maybe_cast_inputs(inputs)
--> 856           outputs = call_fn(inputs, *args, **kwargs)
    857 
    858         self._handle_activity_regularization(inputs, outputs)

D:\pictureprocess\anaconda\lib\site-packages\keras\layers\core.py in _call_wrapper(*args, **kwargs)
   1341     # Decorate the function to produce this layer's call method
   1342     def _call_wrapper(*args, **kwargs):
-> 1343       return self._call_wrapper(*args, **kwargs)
   1344     self.call = tf.__internal__.decorator.make_decorator(function, _call_wrapper)
   1345 

D:\pictureprocess\anaconda\lib\site-packages\keras\layers\core.py in _call_wrapper(self, *args, **kwargs)
   1373       # multiple ops w/ the same name when the layer is reused)
   1374       kwargs.pop('name', None)
-> 1375       result = self.function(*args, **kwargs)
   1376     self._check_variables(created_variables, tape.watched_variables())
   1377     return result

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\util\dispatch.py in wrapper(*args, **kwargs)
    204     """Call target, and fall back on dispatchers if there is a TypeError."""
    205     try:
--> 206       return target(*args, **kwargs)
    207     except (TypeError, ValueError):
    208       # Note: convert_to_eager_tensor currently raises a ValueError, not a

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py in multiply(x, y, name)
    528   """
    529 
--> 530   return gen_math_ops.mul(x, y, name)
    531 
    532 

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in mul(x, y, name)
   6246       pass  # Add nodes to the TensorFlow graph.
   6247   # Add nodes to the TensorFlow graph.
-> 6248   _, _, _op, _outputs = _op_def_library._apply_op_helper(
   6249         "Mul", x=x, y=y, name=name)
   6250   _result = _outputs[:]

D:\pictureprocess\anaconda\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(op_type_name, name, **keywords)
    618         if input_arg.type_attr in attrs:
    619           if attrs[input_arg.type_attr] != attr_value:
--> 620             raise TypeError(
    621                 "Input '%s' of '%s' Op has type %s that does not "
    622                 "match type %s of argument '%s'." %

TypeError: Input 'y' of 'Mul' Op has type float64 that does not match type float32 of argument 'x'.

 

检查下输入,这个报错是说网络限定float32的数据,但是你的输入是float64的。或者你可以强制用numpy或者torch将float64的转成float32的

您好,我是有问必答小助手,您的问题已经有小伙伴解答了,您看下是否解决,可以追评进行沟通哦~

如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~

ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632