循迹是指通过识别地面上的标记或颜色来控制小车移动方向的技术。对于树莓派小车的循迹,你可以尝试以下几种方法:
使用红外线传感器:你可以安装红外线传感器在小车底部,通过检测地面上红外线的反射程度,来判断小车应该往哪个方向移动。你可以通过编程来控制小车根据传感器的检测结果进行运动。
使用摄像头和图像处理:你可以配备一台摄像头,然后使用OpenCV库进行图像处理,通过识别地面上的标记或颜色来判断小车的移动方向。你可以编写算法来分析摄像头捕捉到的图像,在图像中找到颜色标记或轮廓,然后根据识别结果控制小车的运动。
另外,在树莓派小车的循迹过程中,你可以使用Python编程语言进行开发。Python具有简单易用的语法和强大的图像处理和机器视觉库,适合实现循迹功能。
关于具体的实现细节和代码示例,你可以参考一些开源项目、教程或资料,例如GitHub上的树莓派小车项目和相关论坛中的讨论。同时,你也可以参考树莓派官方文档以及OpenCV文档,了解更多有关使用树莓派和OpenCV进行循迹的方法和代码示例。
def unifyImg(strSource):
#read all images and get the smallest size
x_min = 9999
y_min = 9999
img_Bingbing = list()
img_file_list = os.listdir(strSource)
for i in range(0,len(img_file_list)):
strFileName = os.path.join(strSource,img_file_list[i])
if os.path.isfile(strFileName):
img = cv2.imread(strFileName)
if img is None:
print('Failed when read file %s'%strFileName)
continue
# record size
if img.shape[0] < y_min:
y_min = img.shape[0]
if img.shape[1] < x_min:
x_min = img.shape[1]
#store img and its name
img_Bingbing.append([img,img_file_list[i]])
#Resize images
frontFaceCascade = cv2.CascadeClassifier(r'.\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
profileFaceCascade = cv2.CascadeClassifier(r'.\Lib\site-packages\cv2\data\haarcascade_profileface.xml')
for i in range(0,len(img_Bingbing)):
img = img_Bingbing[i][0]
#check if face is identified in the image
frontFaces = frontFaceCascade.detectMultiScale(img)
profileFaces = profileFaceCascade.detectMultiScale(img)
if len(frontFaces) == 0:
if len(profileFaces) == 0:
print("no face detected on img: %s, deleted."%img_Bingbing[i][1])
continue
else:
x = profileFaces[0][0]
y = profileFaces[0][1]
w = profileFaces[0][2]
h = profileFaces[0][3]
else:
x = frontFaces[0][0]
y = frontFaces[0][1]
w = frontFaces[0][2]
h = frontFaces[0][3]
img_new = img[y:y+h,x:x+w]
cv2.imwrite(strSource + "\\Face_only\\" + img_Bingbing[i][1],img_new)
# 98% make sure the output image size is smaller than the min_x or min_y
rate_x = x_min / img.shape[1] * 0.98
rate_y = y_min / img.shape[0] * 0.98
rate = min(rate_x, rate_y)
img_new = cv2.resize(img, None, fx = rate, fy = rate)
if len(img_new.shape) == 2:
# grey
img_template = np.zeros((y_min, x_min),np.uint8)
else:
img_template = np.zeros((y_min, x_min, 3),np.uint8)
h = img_new.shape[0]
w = img_new.shape[1]
img_template[0:h,0:w] = img_new
cv2.imwrite(strSource + "\\Unified\\" + img_Bingbing[i][1],img_template)
strSource = r'C:\Waley\Personal\Learning\Python\Image\TryFaceIdentify'
unifyImg(strSource)
src = list()
lables = list()
# LiYiFeng
for i in range(0,6):
img = cv2.imread(strSource + "\\unified\\LYF_" + str(i+1) + ".jpg",cv2.IMREAD_GRAYSCALE)
if img is None:
print("Fail to read images")
sys.exit(0)
else:
src.append(img)
lables.append(0)
#YiYangQianXi
for i in range(0,6):
img = cv2.imread(strSource + "\\unified\\YYQX_" + str(i+1) + ".jpg",cv2.IMREAD_GRAYSCALE)
if img is None:
print("Fail to read images")
sys.exit(0)
else:
src.append(img)
lables.append(1)
names = {"0":"LiYiFeng","1":"YiYangQianXi"}
#train recognizer
recognizer_eigenface.train(src,np.array(lables))
recognizer_fisher.train(src, np.array(lables))
recognizer_LBPH.train(src, np.array(lables))
for i in range(7,10):
img = cv2.imread(strSource + "\\unified\\LYF_" + str(i) + '.jpg',cv2.IMREAD_GRAYSCALE)
print("LYF %d : "%i)
lable, confidence = recognizer_eigenface.predict(img)
print("predicted by Eigenface: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_fisher.predict(img)
print("predicted by Fisher: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_LBPH.predict(img)
print("predicted by LBPH: %s, confidence is: %d"%(names[str(lable)],confidence))
print("\n\n")
for i in range(7,10):
img = cv2.imread(strSource + "\\unified\\YYQX_" + str(i) + '.jpg',cv2.IMREAD_GRAYSCALE)
print("YYQX %d : "%i)
lable, confidence = recognizer_eigenface.predict(img)
print("predicted by Eigenface: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_fisher.predict(img)
print("predicted by Fisher: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_LBPH.predict(img)
print("predicted by LBPH: %s, confidence is: %d"%(names[str(lable)],confidence))
def trainAndPredict(strSource, strName1, strName2):
# prepare training dataset
src = list()
lables = list()
for i in range(0,6):
img = cv2.imread(strSource + "\\unified\\" + strName1 + '_' + str(i+1) + ".jpg",cv2.IMREAD_GRAYSCALE)
if img is None:
print("Fail to read images")
sys.exit(0)
else:
src.append(img)
lables.append(0)
for i in range(0,6):
img = cv2.imread(strSource + "\\unified\\" + strName2 + '_' + str(i+1) + ".jpg",cv2.IMREAD_GRAYSCALE)
if img is None:
print("Fail to read images")
sys.exit(0)
else:
src.append(img)
lables.append(1)
names = {"0":strName1,"1":strName2}
#set up creator
recognizer_eigenface = cv2.face.EigenFaceRecognizer_create()
recognizer_fisher = cv2.face.FisherFaceRecognizer_create()
recognizer_LBPH = cv2.face.LBPHFaceRecognizer_create()
#train recognizer
recognizer_eigenface.train(src,np.array(lables))
recognizer_fisher.train(src, np.array(lables))
recognizer_LBPH.train(src, np.array(lables))
for i in range(7,10):
img = cv2.imread(strSource + "\\unified\\" + strName1 + '_' + str(i) + '.jpg',cv2.IMREAD_GRAYSCALE)
print("%s %d : "%(strName1,i))
lable, confidence = recognizer_eigenface.predict(img)
print("predicted by Eigenface: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_fisher.predict(img)
print("predicted by Fisher: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_LBPH.predict(img)
print("predicted by LBPH: %s, confidence is: %d"%(names[str(lable)],confidence))
print("\n\n")
for i in range(7,10):
img = cv2.imread(strSource + "\\unified\\" + strName2 + '_' + str(i) + '.jpg',cv2.IMREAD_GRAYSCALE)
print("%s %d : "%(strName2,i))
lable, confidence = recognizer_eigenface.predict(img)
print("predicted by Eigenface: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_fisher.predict(img)
print("predicted by Fisher: %s, confidence is: %d"%(names[str(lable)],confidence))
lable, confidence = recognizer_LBPH.predict(img)
print("predicted by LBPH: %s, confidence is: %d"%(names[str(lable)],confidence))
#strSource = r'C:\Waley\Personal\Learning\Python\Image\TryFaceIdentify'
#strName1 = 'LYF'
#strName2 = 'YYQX'
strSource = r'C:\Waley\Personal\Learning\Python\Image\FaceIdentify'
strName1 = 'LiBingbing'
strName2 = 'YangMi'
trainAndPredict(strSource, strName1, strName2)
换了两位女明星试验,效果似乎还行: