working
This commit is contained in:
BIN
__pycache__/draw.cpython-312.pyc
Normal file
BIN
__pycache__/draw.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/filter.cpython-312.pyc
Normal file
BIN
__pycache__/filter.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/utils.cpython-312.pyc
Normal file
BIN
__pycache__/utils.cpython-312.pyc
Normal file
Binary file not shown.
36
mac.py
Normal file
36
mac.py
Normal file
@ -0,0 +1,36 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
mp_drawing_styles = mp.solutions.drawing_styles
|
||||
mp_pose = mp.solutions.pose
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
with mp_pose.Pose(
|
||||
min_detection_confidence=0.5,
|
||||
min_tracking_confidence=0.5) as pose:
|
||||
while cap.isOpened():
|
||||
success, image = cap.read()
|
||||
if not success:
|
||||
print("Ignoring empty camera frame.")
|
||||
continue
|
||||
|
||||
# To improve performance, optionally mark the image as not writeable to
|
||||
# pass by reference.
|
||||
image.flags.writeable = False
|
||||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||
results = pose.process(image)
|
||||
|
||||
# Draw the pose annotation on the image.
|
||||
image.flags.writeable = True
|
||||
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||
mp_drawing.draw_landmarks(
|
||||
image,
|
||||
results.pose_landmarks,
|
||||
mp_pose.POSE_CONNECTIONS,
|
||||
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
|
||||
# Flip the image horizontally for a selfie-view display.
|
||||
cv2.imshow('MediaPipe Pose', cv2.flip(image, 1))
|
||||
if cv2.waitKey(5) & 0xFF == 27:
|
||||
break
|
||||
|
||||
cap.release()
|
||||
5
main.py
5
main.py
@ -69,7 +69,10 @@ def main():
|
||||
fps = 1 / delta if delta > 0 else float('inf')
|
||||
# print(f"\rDelta: {delta:.4f}s, FPS: {fps:.2f}", end="")
|
||||
|
||||
for result in results:
|
||||
if len(results) == 0:
|
||||
continue
|
||||
|
||||
result = results[0]
|
||||
kpts = result.keypoints.data[0] if len(result.keypoints.data) else None
|
||||
|
||||
if kpts is None:
|
||||
|
||||
@ -35,7 +35,7 @@ for i, move in enumerate(moves):
|
||||
|
||||
|
||||
# Do rysowania (np. przesunięcie na ekran)
|
||||
draw = utils.normalize(move[2])
|
||||
draw = utils.normalize(move[2]) * 200 + 250
|
||||
|
||||
cv2.imshow('you', draw_new(draw))
|
||||
cv2.waitKey(1)
|
||||
|
||||
15
utils.py
15
utils.py
@ -15,21 +15,26 @@ def recvall(sock, n):
|
||||
def distance(p1, p2):
|
||||
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
|
||||
|
||||
import numpy as np
|
||||
|
||||
def normalize(move):
|
||||
left_hip = move[11] # Left Hip
|
||||
right_hip = move[12] # Right Hip
|
||||
nose = move[0] # Nose (głowa)
|
||||
|
||||
# Środek bioder
|
||||
center = (left_hip + right_hip) / 2
|
||||
|
||||
# Przesunięcie względem środka
|
||||
normalized_keypoints = move - center
|
||||
distances = np.linalg.norm(normalized_keypoints[:, :2], axis=1)
|
||||
max_dist = np.max(distances)
|
||||
|
||||
if max_dist > 0:
|
||||
normalized_keypoints[:, :2] /= max_dist
|
||||
# Zamiast max_dist używamy stałej miary "rozmiaru ciała"
|
||||
body_height = np.linalg.norm(nose[:2] - center[:2]) # np. odległość biodra-głowa
|
||||
|
||||
if body_height > 0:
|
||||
normalized_keypoints[:, :2] /= body_height
|
||||
|
||||
draw = normalized_keypoints[:, :2]
|
||||
|
||||
return draw
|
||||
|
||||
def find_closest(moves, target):
|
||||
|
||||
Reference in New Issue
Block a user