### ch06 程式碼下載 ### $ cd ~/Lab $ wget https://max543.com/debugger/class/python02/PiCamera/Lab/code/ch06.zip $ unzip ch06.zip ### ex6-1 ### # Google MediaPipe 官方網址 https://mediapipe.dev/ ### ex6-2 ### # 從已安裝 opencv 的虛擬環境,複製出一個名為 mediapipe 的 Python 3 虛擬環境 $ cpvirtualenv opencv mediapipe # 在樹莓派 4 安裝 MediaPipe (mediapipe) $ pip install mediapipe-rpi4==0.8.4.0 # 在樹莓派 3 安裝 MediaPipe (mediapipe) $ pip install mediapipe-rpi3==0.8.4.0 ### ex6-3 (6-1.py) ### import cv2 import mediapipe as mp mp_face_detection = mp.solutions.face_detection mp_drawing = mp.solutions.drawing_utils cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) face_detection = mp_face_detection.FaceDetection(min_detection_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.rotate(frame, rotateCode = 1) frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = face_detection.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) if results.detections: for detection in results.detections: mp_drawing.draw_detection(frame, detection) cv2.imshow("MediaPipe Face Detection", frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-4 (6-2.py) ### import cv2 import mediapipe as mp mp_drawing = mp.solutions.drawing_utils mp_face_mesh = mp.solutions.face_mesh drawing_spec = mp_drawing.DrawingSpec(thickness = 1, circle_radius = 1) cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.rotate(frame, rotateCode = 1) frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = face_mesh.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: mp_drawing.draw_landmarks(image = frame, landmark_list = face_landmarks, connections = mp_face_mesh.FACE_CONNECTIONS, landmark_drawing_spec = drawing_spec, connection_drawing_spec = drawing_spec) cv2.imshow("MediaPipe FaceMesh", frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-5 (6-3.py) ### import cv2 import mediapipe as mp mp_drawing = mp.solutions.drawing_utils mp_hands = mp.solutions.hands cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) hands = mp_hands.Hands(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.rotate(frame, rotateCode = 1) frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = hands.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS) cv2.imshow("MediaPipe Hands", frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-6 (6-4.py) ### import cv2 import mediapipe as mp mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) pose = mp_pose.Pose(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.rotate(frame, rotateCode = 1) frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = pose.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) cv2.imshow("MediaPipe Pose", frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-7 ### # 在 mediapipe 虛擬環境中,繼續安裝 CVZone (mediapipe) $ pip install cvzone==1.5.2 ### ex6-8 (6-5.py) ### from cvzone.FaceDetectionModule import FaceDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = FaceDetector() while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) img = cv2.flip(img, 1) img, bboxs = detector.findFaces(img) if bboxs: # bboxInfo - "id","bbox","score","center" center = bboxs[0]["center"] cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-9 (6-6.py) ### from cvzone.FaceMeshModule import FaceMeshDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = FaceMeshDetector(maxFaces = 2) while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) img = cv2.flip(img, 1) img, faces = detector.findFaceMesh(img) if faces: print(faces[0]) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-10 (6-7.py) ### from cvzone.HandTrackingModule import HandDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = HandDetector(detectionCon = 0.5, maxHands = 2) while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) # img = cv2.flip(img, 1) hands, img = detector.findHands(img) if hands: # Hand 1 hand1 = hands[0] lmList1 = hand1["lmList"] bbox1 = hand1["bbox"] centerPoint1 = hand1['center'] handType1 = hand1["type"] fingers1 = detector.fingersUp(hand1) if len(hands) == 2: # Hand 2 hand2 = hands[1] lmList2 = hand2["lmList"] bbox2 = hand2["bbox"] centerPoint2 = hand2['center'] handType2 = hand2["type"] fingers2 = detector.fingersUp(hand2) length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-11 (6-7a.py) ### from cvzone.HandTrackingModule import HandDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = HandDetector(detectionCon = 0.5, maxHands = 1) while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) # img = cv2.flip(img, 1) hands, img = detector.findHands(img) if hands: hand = hands[0] bbox = hand['bbox'] fingers = detector.fingersUp(hand) totalFingers = fingers.count(1) cv2.putText(img, f'Fingers:{totalFingers}', (bbox[0] + 50, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-12 (6-7b.py) ### from cvzone.HandTrackingModule import HandDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = HandDetector(detectionCon = 0.5, maxHands = 2) while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) # img = cv2.flip(img, 1) hands, img = detector.findHands(img) if hands: # Hand 1 hand1 = hands[0] lmList1 = hand1["lmList"] bbox1 = hand1["bbox"] centerPoint1 = hand1['center'] handType1 = hand1["type"] fingers1 = detector.fingersUp(hand1) length, info, img = detector.findDistance(lmList1[8], lmList1[12], img) cv2.putText(img, f'Dist:{int(length)}', (bbox1[0] + 50, bbox1[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) if len(hands) == 2: # Hand 2 hand2 = hands[1] lmList2 = hand2["lmList"] bbox2 = hand2["bbox"] centerPoint2 = hand2['center'] handType2 = hand2["type"] fingers2 = detector.fingersUp(hand2) length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) cv2.putText(img, f'Dist:{int(length)}',(bbox2[0] + 50, bbox2[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-13 (6-8.py) ### from cvzone.PoseModule import PoseDetector import cv2 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = PoseDetector() while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img = cv2.rotate(img, rotateCode = 1) img = cv2.flip(img, 1) img = detector.findPose(img) lmList, bboxInfo = detector.findPosition(img, bboxWithHands = False) if bboxInfo: center = bboxInfo["center"] cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED) cv2.imshow("Image", img) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### ex6-14 ### # 從已安裝 opencv 的虛擬環境,複製出一個名為 tflite 的 Python 3 虛擬環境 $ cpvirtualenv opencv tflite # 查詢 CPU 類型 (tflite) $ uname -m # 查詢 Python 版本 (tflite) $ python --version ### ex6-15 ### # 下載指定 CPU 和 Python 版本的 Wheel 檔來安裝 TensorFlow Lite https://github.com/google-coral/pycoral/releases/ ### ex6-16 ### # 透過指令下載檔案 (請注意:官網的網址可能會更改) (tflite) $ wget https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp37-cp37m-linux_armv7l.whl # 直接網路安裝 (tflite) $ pip install https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-cp37-cp37m-linux_armv7l.whl ### ex6-17 ### # 取得 TensorFlow Lite 預訓練模型:MobileNet https://tfhub.dev/tensorflow/lite-model/ssd_mobilenet_v1/1/metadata/2