### lab4-1 (4-1.py) ### import cv2 ESC = 27 rect = ((220, 20), (370, 190)) # 設定 ROI 的座標範圍 (left, top), (right, bottom) = rect # 自訂一個取出子畫面的函數 def roiarea(frame): return frame[top:bottom, left:right] # 自訂一個將 ROI 區域的資料貼回原本畫面的函數 def replaceroi(frame, roi): frame[top:bottom, left:right] = roi return frame cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) while True: ret, frame = cap.read() frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.flip(frame, 1) # 取出子畫面 roi = roiarea(frame) roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # 將處理完的子畫面貼回到原本畫面中 frame = replaceroi(frame, roi) # 在 ROI 範圍處畫個框 cv2.rectangle(frame, rect[0], rect[1], (0, 0, 255), 2) cv2.imshow('frame', frame) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-2 (4-2.py) ### import cv2 ESC = 27 cap = cv2.VideoCapture('vtest.avi') # OpenCV 範例影片 vtest.avi tracker = cv2.TrackerCSRT_create() roi = None # 宣告一個變數 roi 來儲存欲追蹤的座標位置 while True: ret, frame = cap.read() if roi is None: roi = cv2.selectROI('frame', frame) if roi != (0, 0, 0, 0): # 若有框出一個區域,則將矩形區域傳給追蹤演算法的 init() 函數 tracker.init(frame, roi) success, rect = tracker.update(frame) # 物體移動到新的區域,update() 計算新區域的座標,將新座標交給 rectangle() 畫出一個矩形區域 if success: (x, y, w, h) = [int(i) for i in rect] cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow('frame', frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ### lab4-3 (4-3.py) ### import cv2 ESC = 27 # 加入即時影像的設定 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) tracker = cv2.TrackerCSRT_create() roi = None while True: ret, frame = cap.read() # 讀取即時影像 frame = cv2.resize(frame, (WIDTH, HEIGHT)) if roi is None: roi = cv2.selectROI('frame', frame) if roi != (0, 0, 0, 0): tracker.init(frame, roi) success, rect = tracker.update(frame) if success: (x, y, w, h) = [int(i) for i in rect] cv2.rectangle(frame, (x,y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow('frame', frame) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-4 ### # Google MediaPipe 官方網址 https://mediapipe.dev/ ### lab4-5 ### # 從已安裝 opencv 的虛擬環境,複製出一個名為 mediapipe 的 Python 3 虛擬環境 $ cpvirtualenv opencv mediapipe # 在樹莓派 4 安裝 MediaPipe (mediapipe) $ pip install mediapipe-rpi4==0.8.4.0 # 在樹莓派 3 安裝 MediaPipe (mediapipe) $ pip install mediapipe-rpi3==0.8.4.0 ### lab4-6 (4-4.py) ### import cv2 import mediapipe as mp # 匯入 mediapipe 模組 ESC = 27 mp_face_detection = mp.solutions.face_detection # 宣告 face_detection 的物件實例 (face_detection 類別用來偵測人臉) mp_drawing = mp.solutions.drawing_utils # 宣告 drawing_utils 的物件實例 (drawing_utils 類別用來畫圖) cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) face_detection = mp_face_detection.FaceDetection(min_detection_confidence = 0.5) # 使用 face_detection 物件的 FaceDetection() 來偵測人臉 while cap.isOpened(): ret, frame = cap.read() frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 將影像轉為 RGB frame.flags.writeable = False # 設定影像為不能讀寫 results = face_detection.process(frame) # 辨識每一張影像 frame.flags.writeable = True # 設定影像為讀寫 frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # 將影像轉為 BGR if results.detections: # 如果有人臉被辨識 for detection in results.detections: # 走訪每張臉 mp_drawing.draw_detection(frame, detection) # 標註 6 個關鍵點 cv2.imshow('MediaPipe Face Detection', frame) # 顯示影像 if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-7 (4-5.py) ### import cv2 import mediapipe as mp ESC = 27 mp_drawing = mp.solutions.drawing_utils mp_face_mesh = mp.solutions.face_mesh drawing_spec = mp_drawing.DrawingSpec(thickness = 1, circle_radius = 1) cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = face_mesh.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) if results.multi_face_landmarks: # 如果有人臉被辨識 for face_landmarks in results.multi_face_landmarks: # 走訪每張臉 mp_drawing.draw_landmarks(image = frame, # 標註 468 個關鍵點 landmark_list = face_landmarks, connections = mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec = drawing_spec, connection_drawing_spec = drawing_spec) cv2.imshow('MediaPipe FaceMesh', frame) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-8 (4-6.py) ### import cv2 import mediapipe as mp ESC = 27 mp_drawing = mp.solutions.drawing_utils mp_hands = mp.solutions.hands # 宣告 hands 的物件實例 (hands 類別用來偵測手勢) cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) hands = mp_hands.Hands(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = hands.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS) cv2.imshow('MediaPipe Hands', frame) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-9 (4-7.py) ### import cv2 import mediapipe as mp ESC = 27 mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) pose = mp_pose.Pose(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) while cap.isOpened(): ret, frame = cap.read() frame = cv2.resize(frame, (WIDTH, HEIGHT)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame.flags.writeable = False results = pose.process(frame) frame.flags.writeable = True frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) cv2.imshow('MediaPipe Pose', frame) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows() ### lab4-10 ### # CVZone 需要搭配 OpenCV 與 mediapipe,請先複製之前的 mediapipe 虛擬環境: $ cpvirtualenv mediapipe cvzone # 繼續安裝 CVZone (cvzone) $ pip install cvzone==1.5.2 ### lab4-11 (4-8.py) ### from cvzone.FaceDetectionModule import FaceDetector import cv2 ESC = 27 cap = cv2.VideoCapture(0) ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT) WIDTH = 400 HEIGHT = int(WIDTH / ratio) detector = FaceDetector() # 宣告 FaceDetector 的物件實例 (FaceDetector() 用來偵測人臉) while cap.isOpened(): success, img = cap.read() img = cv2.resize(img, (WIDTH, HEIGHT)) img, bboxs = detector.findFaces(img) if bboxs: # 如果有人臉被辨識 # bboxInfo - 'id', 'bbox', 'score', 'center' center = bboxs[0]['center'] # 設定中心點的座標 cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED) # 以中心點為座標,畫出一個圓 cv2.imshow('Image', img) if cv2.waitKey(1) == ESC: break cap.release() cv2.destroyAllWindows()