Compare commits

...

2 Commits

Author SHA1 Message Date
5ab222aee3 working 2025-09-10 19:51:05 +02:00
5560f6b2e6 working 2025-08-25 19:37:48 +02:00
24 changed files with 1242 additions and 1115 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.pth filter=lfs diff=lfs merge=lfs -text

6
.gitignore vendored
View File

@ -1,6 +1,8 @@
/mmdetection/ /mmdetection/
/mmpose/
/.ipynb_checkpoints/ /.ipynb_checkpoints/
/.gpu/ /.gpu/
/.gpu-3d/ /.gpu-3d/
/.venv/ /.venv/
/venv/
yolo11*

7
.idea/JustTwerk.iml generated
View File

@ -8,7 +8,10 @@
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="PyDocumentationSettings"> <component name="PyDocumentationSettings">
<option name="format" value="GOOGLE" /> <option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Google" /> <option name="myDocStringFormat" value="Plain" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="py.test" />
</component> </component>
</module> </module>

2
.idea/vcs.xml generated
View File

@ -2,7 +2,5 @@
<project version="4"> <project version="4">
<component name="VcsDirectoryMappings"> <component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" /> <mapping directory="$PROJECT_DIR$" vcs="Git" />
<mapping directory="$PROJECT_DIR$/mmdetection" vcs="Git" />
<mapping directory="$PROJECT_DIR$/mmpose" vcs="Git" />
</component> </component>
</project> </project>

149
.idea/workspace.xml generated
View File

@ -4,23 +4,11 @@
<option name="autoReloadType" value="SELECTIVE" /> <option name="autoReloadType" value="SELECTIVE" />
</component> </component>
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="441a4e7b-d6ce-44cb-92c5-2f22f1b1874f" name="Changes" comment="initial commit"> <list default="true" id="441a4e7b-d6ce-44cb-92c5-2f22f1b1874f" name="Changes" comment="working">
<change afterPath="$PROJECT_DIR$/.gitignore" afterDir="false" /> <change beforePath="$PROJECT_DIR$/.idea/vcs.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/vcs.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/JustTwerk.iml" afterDir="false" /> <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/inspectionProfiles/profiles_settings.xml" afterDir="false" /> <change beforePath="$PROJECT_DIR$/main.py" beforeDir="false" afterPath="$PROJECT_DIR$/main.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" /> <change beforePath="$PROJECT_DIR$/utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/utils.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/vcs.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/02_whole_body_from_image.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/body3d.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth" afterDir="false" />
<change afterPath="$PROJECT_DIR$/humanPoseDetection.ipynb" afterDir="false" />
<change afterPath="$PROJECT_DIR$/is_torch.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth" afterDir="false" />
<change afterPath="$PROJECT_DIR$/rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth" afterDir="false" />
<change afterPath="$PROJECT_DIR$/test.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth" afterDir="false" />
<change beforePath="$PROJECT_DIR$/mmpose/demo/body3d_pose_lifter_demo.py" beforeDir="false" afterPath="$PROJECT_DIR$/mmpose/demo/body3d_pose_lifter_demo.py" afterDir="false" />
</list> </list>
<option name="SHOW_DIALOG" value="false" /> <option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" /> <option name="HIGHLIGHT_CONFLICTS" value="true" />
@ -36,6 +24,7 @@
</component> </component>
<component name="Git.Settings"> <component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
<option name="ROOT_SYNC" value="SYNC" />
</component> </component>
<component name="ProjectColorInfo">{ <component name="ProjectColorInfo">{
&quot;associatedIndex&quot;: 6 &quot;associatedIndex&quot;: 6
@ -45,37 +34,69 @@
<option name="hideEmptyMiddlePackages" value="true" /> <option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" /> <option name="showLibraryContents" value="true" />
</component> </component>
<component name="PropertiesComponent"><![CDATA[{ <component name="PropertiesComponent">{
"keyToString": { &quot;keyToString&quot;: {
"ModuleVcsDetector.initialDetectionPerformed": "true", &quot;ModuleVcsDetector.initialDetectionPerformed&quot;: &quot;true&quot;,
"Python.02_whole_body_from_image.executor": "Run", &quot;Python.02_whole_body_from_image.executor&quot;: &quot;Run&quot;,
"Python.body3d.executor": "Run", &quot;Python.3d.executor&quot;: &quot;Run&quot;,
"Python.body3d_pose_lifter_demo.executor": "Run", &quot;Python.body3d.executor&quot;: &quot;Run&quot;,
"Python.checkpoint.executor": "Run", &quot;Python.body3d_pose_lifter_demo.executor&quot;: &quot;Run&quot;,
"Python.is_torch.executor": "Run", &quot;Python.calculate.executor&quot;: &quot;Run&quot;,
"Python.local_visualizer_3d.executor": "Run", &quot;Python.checkpoint.executor&quot;: &quot;Run&quot;,
"Python.openpose.executor": "Run", &quot;Python.draw.executor&quot;: &quot;Run&quot;,
"Python.test.executor": "Run", &quot;Python.filter.executor&quot;: &quot;Run&quot;,
"RunOnceActivity.ShowReadmeOnStart": "true", &quot;Python.is_torch.executor&quot;: &quot;Run&quot;,
"RunOnceActivity.TerminalTabsStorage.copyFrom.TerminalArrangementManager.252": "true", &quot;Python.local_visualizer_3d.executor&quot;: &quot;Run&quot;,
"RunOnceActivity.git.unshallow": "true", &quot;Python.main.executor&quot;: &quot;Run&quot;,
"git-widget-placeholder": "main", &quot;Python.openpose.executor&quot;: &quot;Run&quot;,
"last_opened_file_path": "C:/Users/Kajetan/PycharmProjects/JustTwerk", &quot;Python.receive_images.executor&quot;: &quot;Run&quot;,
"node.js.detected.package.eslint": "true", &quot;Python.receiver.executor&quot;: &quot;Run&quot;,
"node.js.detected.package.tslint": "true", &quot;Python.sender.executor&quot;: &quot;Run&quot;,
"node.js.selected.package.eslint": "(autodetect)", &quot;Python.test.executor&quot;: &quot;Run&quot;,
"node.js.selected.package.tslint": "(autodetect)", &quot;Python.ultralytics-test.executor&quot;: &quot;Run&quot;,
"nodejs_package_manager_path": "npm", &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
"settings.editor.selected.configurable": "editor.preferences.fonts.default", &quot;RunOnceActivity.TerminalTabsStorage.copyFrom.TerminalArrangementManager.252&quot;: &quot;true&quot;,
"vue.rearranger.settings.migration": "true" &quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/Kajetan/PycharmProjects/JustTwerk&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;configurable.group.editor&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
} }
}]]></component> }</component>
<component name="RecentsManager"> <component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS"> <key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\Kajetan\PycharmProjects\JustTwerk" /> <recent name="C:\Users\Kajetan\PycharmProjects\JustTwerk" />
</key> </key>
</component> </component>
<component name="RunManager"> <component name="RunManager" selected="Python.draw">
<configuration name="draw" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="JustTwerk" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/draw.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> <configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="JustTwerk" /> <module name="JustTwerk" />
<option name="ENV_FILES" value="" /> <option name="ENV_FILES" value="" />
@ -101,6 +122,7 @@
</configuration> </configuration>
<recent_temporary> <recent_temporary>
<list> <list>
<item itemvalue="Python.draw" />
<item itemvalue="Python.test" /> <item itemvalue="Python.test" />
</list> </list>
</recent_temporary> </recent_temporary>
@ -124,8 +146,31 @@
<workItem from="1755884695519" duration="705000" /> <workItem from="1755884695519" duration="705000" />
<workItem from="1755885461444" duration="2686000" /> <workItem from="1755885461444" duration="2686000" />
<workItem from="1755888180570" duration="3107000" /> <workItem from="1755888180570" duration="3107000" />
<workItem from="1755891319108" duration="23374000" /> <workItem from="1755891319108" duration="33842000" />
<workItem from="1755974689137" duration="258000" />
<workItem from="1755974961407" duration="19035000" />
<workItem from="1756053672258" duration="16821000" />
<workItem from="1756216787734" duration="969000" />
<workItem from="1756632365037" duration="26000" />
<workItem from="1757522631129" duration="1771000" />
</task> </task>
<task id="LOCAL-00001" summary="initial commit">
<option name="closed" value="true" />
<created>1755963464017</created>
<option name="number" value="00001" />
<option name="presentableId" value="LOCAL-00001" />
<option name="project" value="LOCAL" />
<updated>1755963464017</updated>
</task>
<task id="LOCAL-00002" summary="working">
<option name="closed" value="true" />
<created>1756143470328</created>
<option name="number" value="00002" />
<option name="presentableId" value="LOCAL-00002" />
<option name="project" value="LOCAL" />
<updated>1756143470328</updated>
</task>
<option name="localTasksCounter" value="3" />
<servers /> <servers />
</component> </component>
<component name="TypeScriptGeneratedFilesManager"> <component name="TypeScriptGeneratedFilesManager">
@ -133,16 +178,26 @@
</component> </component>
<component name="VcsManagerConfiguration"> <component name="VcsManagerConfiguration">
<MESSAGE value="initial commit" /> <MESSAGE value="initial commit" />
<option name="LAST_COMMIT_MESSAGE" value="initial commit" /> <MESSAGE value="working" />
<option name="LAST_COMMIT_MESSAGE" value="working" />
</component> </component>
<component name="com.intellij.coverage.CoverageDataManagerImpl"> <component name="com.intellij.coverage.CoverageDataManagerImpl">
<SUITE FILE_PATH="coverage/JustTwerk$openpose.coverage" NAME="openpose Coverage Results" MODIFIED="1755886110615" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/human-pose-estimation-opencv" /> <SUITE FILE_PATH="coverage/JustTwerk$main.coverage" NAME="main Coverage Results" MODIFIED="1756143599014" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$receiver.coverage" NAME="receiver Coverage Results" MODIFIED="1756142451233" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$filter.coverage" NAME="filter Coverage Results" MODIFIED="1755972211046" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$body3d_pose_lifter_demo.coverage" NAME="body3d_pose_lifter_demo Coverage Results" MODIFIED="1755937235510" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/mmpose/demo" /> <SUITE FILE_PATH="coverage/JustTwerk$body3d_pose_lifter_demo.coverage" NAME="body3d_pose_lifter_demo Coverage Results" MODIFIED="1755937235510" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/mmpose/demo" />
<SUITE FILE_PATH="coverage/JustTwerk$body3d.coverage" NAME="body3d Coverage Results" MODIFIED="1755944498141" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> <SUITE FILE_PATH="coverage/JustTwerk$body3d.coverage" NAME="body3d Coverage Results" MODIFIED="1755944498141" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$02_whole_body_from_image.coverage" NAME="02_whole_body_from_image Coverage Results" MODIFIED="1755885569302" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> <SUITE FILE_PATH="coverage/JustTwerk$02_whole_body_from_image.coverage" NAME="02_whole_body_from_image Coverage Results" MODIFIED="1755885569302" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$local_visualizer_3d.coverage" NAME="local_visualizer_3d Coverage Results" MODIFIED="1755937454029" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/.gpu/Lib/site-packages/mmpose/visualization" />
<SUITE FILE_PATH="coverage/JustTwerk$checkpoint.coverage" NAME="checkpoint Coverage Results" MODIFIED="1755936916130" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/.gpu/Lib/site-packages/mmengine/runner" /> <SUITE FILE_PATH="coverage/JustTwerk$checkpoint.coverage" NAME="checkpoint Coverage Results" MODIFIED="1755936916130" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/.gpu/Lib/site-packages/mmengine/runner" />
<SUITE FILE_PATH="coverage/JustTwerk$calculate.coverage" NAME="calculate Coverage Results" MODIFIED="1756054778057" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$receive_images.coverage" NAME="receive_images Coverage Results" MODIFIED="1755966230858" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$sender.coverage" NAME="sender Coverage Results" MODIFIED="1756142463914" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$openpose.coverage" NAME="openpose Coverage Results" MODIFIED="1755886110615" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/human-pose-estimation-opencv" />
<SUITE FILE_PATH="coverage/JustTwerk$draw.coverage" NAME="draw Coverage Results" MODIFIED="1756053706980" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$ultralytics_test.coverage" NAME="ultralytics-test Coverage Results" MODIFIED="1756116377896" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$3d.coverage" NAME="3d Coverage Results" MODIFIED="1756027604884" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$local_visualizer_3d.coverage" NAME="local_visualizer_3d Coverage Results" MODIFIED="1755937454029" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/.gpu/Lib/site-packages/mmpose/visualization" />
<SUITE FILE_PATH="coverage/JustTwerk$is_torch.coverage" NAME="is_torch Coverage Results" MODIFIED="1755943611769" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> <SUITE FILE_PATH="coverage/JustTwerk$is_torch.coverage" NAME="is_torch Coverage Results" MODIFIED="1755943611769" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/JustTwerk$test.coverage" NAME="test Coverage Results" MODIFIED="1755962675907" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> <SUITE FILE_PATH="coverage/JustTwerk$test.coverage" NAME="test Coverage Results" MODIFIED="1756025632346" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
</component> </component>
</project> </project>

View File

@ -1,70 +0,0 @@
# From Python
# It requires OpenCV installed for Python
import sys
import cv2
import os
from sys import platform
import argparse
try:
# Import Openpose (Windows/Ubuntu/OSX)
dir_path = r"C:\Users\Kajetan\Documents\openpose/python"
try:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '/../bin/python/openpose/Release');
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../x64/Release;' + dir_path + '/../bin;'
print(os.environ["PATH"])
import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
# Flags
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", default="../examples/media/COCO_val2014_000000000241.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
args = parser.parse_known_args()
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../models/"
params["face"] = True
params["hand"] = True
# Add others in path?
for i in range(0, len(args[1])):
curr_item = args[1][i]
if i != len(args[1])-1: next_item = args[1][i+1]
else: next_item = "1"
if "--" in curr_item and "--" in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = "1"
elif "--" in curr_item and "--" not in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = next_item
# Construct it from system arguments
# op.init_argv(args[1])
# oppython = op.OpenposePython()
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
imageToProcess = cv2.imread(args[0].image_path)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# Display Image
print("Body keypoints: \n" + str(datum.poseKeypoints))
print("Face keypoints: \n" + str(datum.faceKeypoints))
print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
cv2.waitKey(0)
except Exception as e:
print(e)
sys.exit(-1)

555
body3d.py
View File

@ -1,555 +0,0 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import mimetypes
import os
import time
from argparse import ArgumentParser
from functools import partial
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmengine.logging import print_log
from mmpose.apis import (_track_by_iou, _track_by_oks,
convert_keypoint_definition, extract_pose_sequence,
inference_pose_lifter_model, inference_topdown,
init_model)
from mmpose.models.pose_estimators import PoseLifter
from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator
from mmpose.registry import VISUALIZERS
from mmpose.structures import (PoseDataSample, merge_data_samples,
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False
def parse_args():
parser = ArgumentParser()
parser.add_argument('--det_config', default="mmpose/demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py", help='Config file for detection')
parser.add_argument('--det_checkpoint', default="rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth", help='Checkpoint file for detection')
parser.add_argument(
'--pose_estimator_config',
type=str,
default="mmpose/configs/body_2d_keypoint/rtmpose/body8/rtmpose-m_8xb256-420e_body8-256x192.py",
help='Config file for the 1st stage 2D pose estimator')
parser.add_argument(
'--pose_estimator_checkpoint',
type=str,
default="rtmpose-m_simcc-body7_pt-body7_420e-256x192-e48f03d0_20230504.pth",
help='Checkpoint file for the 1st stage 2D pose estimator')
parser.add_argument(
'--pose_lifter_config',
default="mmpose/configs/body_3d_keypoint/video_pose_lift/h36m/video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py",
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'--pose_lifter_checkpoint',
default="videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth",
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument('--input', type=str, default='webcam', help='Video path')
parser.add_argument(
'--show',
action='store_true',
default=True,
help='Whether to show visualizations')
parser.add_argument(
'--disable-rebase-keypoint',
action='store_true',
default=False,
help='Whether to disable rebasing the predicted 3D pose so its '
'lowest keypoint has a height of 0 (landing on the ground). Rebase '
'is useful for visualization when the model do not predict the '
'global position of the 3D pose.')
parser.add_argument(
'--disable-norm-pose-2d',
action='store_true',
default=False,
help='Whether to scale the bbox (along with the 2D pose) to the '
'average bbox scale of the dataset, and move the bbox (along with the '
'2D pose) to the average bbox center of the dataset. This is useful '
'when bbox is small, especially in multi-person scenarios.')
parser.add_argument(
'--num-instances',
type=int,
default=1,
help='The number of 3D poses to be visualized in every frame. If '
'less than 0, it will be set to the number of pose results in the '
'first frame.')
parser.add_argument(
'--output-root',
type=str,
default='',
help='Root of the output video file. '
'Default not saving the visualization video.')
parser.add_argument(
'--save-predictions',
action='store_true',
default=False,
help='Whether to save predicted results')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--det-cat-id',
type=int,
default=0,
help='Category id for bounding box detection model')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.3,
help='Bounding box score threshold')
parser.add_argument('--kpt-thr', type=float, default=0.3)
parser.add_argument(
'--use-oks-tracking', action='store_true', help='Using OKS tracking')
parser.add_argument(
'--tracking-thr', type=float, default=0.3, help='Tracking threshold')
parser.add_argument(
'--show-interval', type=int, default=0, help='Sleep seconds per frame')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization')
parser.add_argument(
'--online',
action='store_true',
default=False,
help='Inference mode. If set to True, can not use future frame'
'information when using multi frames for inference in the 2D pose'
'detection stage. Default: False.')
args = parser.parse_args()
return args
def process_one_image(args, detector, frame, frame_idx, pose_estimator,
pose_est_results_last, pose_est_results_list, next_id,
pose_lifter, visualize_frame, visualizer):
"""Visualize detected and predicted keypoints of one image.
Pipeline of this function:
frame
|
V
+-----------------+
| detector |
+-----------------+
| det_result
V
+-----------------+
| pose_estimator |
+-----------------+
| pose_est_results
V
+--------------------------------------------+
| convert 2d kpts into pose-lifting format |
+--------------------------------------------+
| pose_est_results_list
V
+-----------------------+
| extract_pose_sequence |
+-----------------------+
| pose_seq_2d
V
+-------------+
| pose_lifter |
+-------------+
| pose_lift_results
V
+-----------------+
| post-processing |
+-----------------+
| pred_3d_data_samples
V
+------------+
| visualizer |
+------------+
Args:
args (Argument): Custom command-line arguments.
detector (mmdet.BaseDetector): The mmdet detector.
frame (np.ndarray): The image frame read from input image or video.
frame_idx (int): The index of current frame.
pose_estimator (TopdownPoseEstimator): The pose estimator for 2d pose.
pose_est_results_last (list(PoseDataSample)): The results of pose
estimation from the last frame for tracking instances.
pose_est_results_list (list(list(PoseDataSample))): The list of all
pose estimation results converted by
``convert_keypoint_definition`` from previous frames. In
pose-lifting stage it is used to obtain the 2d estimation sequence.
next_id (int): The next track id to be used.
pose_lifter (PoseLifter): The pose-lifter for estimating 3d pose.
visualize_frame (np.ndarray): The image for drawing the results on.
visualizer (Visualizer): The visualizer for visualizing the 2d and 3d
pose estimation results.
Returns:
pose_est_results (list(PoseDataSample)): The pose estimation result of
the current frame.
pose_est_results_list (list(list(PoseDataSample))): The list of all
converted pose estimation results until the current frame.
pred_3d_instances (InstanceData): The result of pose-lifting.
Specifically, the predicted keypoints and scores are saved at
``pred_3d_instances.keypoints`` and
``pred_3d_instances.keypoint_scores``.
next_id (int): The next track id to be used.
"""
pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset
pose_lift_dataset_name = pose_lifter.dataset_meta['dataset_name']
# First stage: conduct 2D pose detection in a Topdown manner
# use detector to obtain person bounding boxes
det_result = inference_detector(detector, frame)
pred_instance = det_result.pred_instances.cpu().numpy()
# filter out the person instances with category and bbox threshold
# e.g. 0 for person in COCO
bboxes = pred_instance.bboxes
bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id,
pred_instance.scores > args.bbox_thr)]
# estimate pose results for current image
pose_est_results = inference_topdown(pose_estimator, frame, bboxes)
if args.use_oks_tracking:
_track = partial(_track_by_oks)
else:
_track = _track_by_iou
pose_det_dataset_name = pose_estimator.dataset_meta['dataset_name']
pose_est_results_converted = []
# convert 2d pose estimation results into the format for pose-lifting
# such as changing the keypoint order, flipping the keypoint, etc.
for i, data_sample in enumerate(pose_est_results):
pred_instances = data_sample.pred_instances.cpu().numpy()
keypoints = pred_instances.keypoints
# calculate area and bbox
if 'bboxes' in pred_instances:
areas = np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
for bbox in pred_instances.bboxes])
pose_est_results[i].pred_instances.set_field(areas, 'areas')
else:
areas, bboxes = [], []
for keypoint in keypoints:
xmin = np.min(keypoint[:, 0][keypoint[:, 0] > 0], initial=1e10)
xmax = np.max(keypoint[:, 0])
ymin = np.min(keypoint[:, 1][keypoint[:, 1] > 0], initial=1e10)
ymax = np.max(keypoint[:, 1])
areas.append((xmax - xmin) * (ymax - ymin))
bboxes.append([xmin, ymin, xmax, ymax])
pose_est_results[i].pred_instances.areas = np.array(areas)
pose_est_results[i].pred_instances.bboxes = np.array(bboxes)
# track id
track_id, pose_est_results_last, _ = _track(data_sample,
pose_est_results_last,
args.tracking_thr)
if track_id == -1:
if np.count_nonzero(keypoints[:, :, 1]) >= 3:
track_id = next_id
next_id += 1
else:
# If the number of keypoints detected is small,
# delete that person instance.
keypoints[:, :, 1] = -10
pose_est_results[i].pred_instances.set_field(
keypoints, 'keypoints')
pose_est_results[i].pred_instances.set_field(
pred_instances.bboxes * 0, 'bboxes')
pose_est_results[i].set_field(pred_instances, 'pred_instances')
track_id = -1
pose_est_results[i].set_field(track_id, 'track_id')
# convert keypoints for pose-lifting
pose_est_result_converted = PoseDataSample()
pose_est_result_converted.set_field(
pose_est_results[i].pred_instances.clone(), 'pred_instances')
pose_est_result_converted.set_field(
pose_est_results[i].gt_instances.clone(), 'gt_instances')
keypoints = convert_keypoint_definition(keypoints,
pose_det_dataset_name,
pose_lift_dataset_name)
pose_est_result_converted.pred_instances.set_field(
keypoints, 'keypoints')
pose_est_result_converted.set_field(pose_est_results[i].track_id,
'track_id')
pose_est_results_converted.append(pose_est_result_converted)
pose_est_results_list.append(pose_est_results_converted.copy())
# Second stage: Pose lifting
# extract and pad input pose2d sequence
pose_seq_2d = extract_pose_sequence(
pose_est_results_list,
frame_idx=frame_idx,
causal=pose_lift_dataset.get('causal', False),
seq_len=pose_lift_dataset.get('seq_len', 1),
step=pose_lift_dataset.get('seq_step', 1))
# conduct 2D-to-3D pose lifting
norm_pose_2d = not args.disable_norm_pose_2d
pose_lift_results = inference_pose_lifter_model(
pose_lifter,
pose_seq_2d,
image_size=visualize_frame.shape[:2],
norm_pose_2d=norm_pose_2d)
# post-processing
for idx, pose_lift_result in enumerate(pose_lift_results):
pose_lift_result.track_id = pose_est_results[idx].get('track_id', 1e4)
pred_instances = pose_lift_result.pred_instances
keypoints = pred_instances.keypoints
keypoint_scores = pred_instances.keypoint_scores
if keypoint_scores.ndim == 3:
keypoint_scores = np.squeeze(keypoint_scores, axis=1)
pose_lift_results[
idx].pred_instances.keypoint_scores = keypoint_scores
if keypoints.ndim == 4:
keypoints = np.squeeze(keypoints, axis=1)
keypoints = keypoints[..., [0, 2, 1]]
keypoints[..., 0] = -keypoints[..., 0]
keypoints[..., 2] = -keypoints[..., 2]
# rebase height (z-axis)
if not args.disable_rebase_keypoint:
keypoints[..., 2] -= np.min(
keypoints[..., 2], axis=-1, keepdims=True)
pose_lift_results[idx].pred_instances.keypoints = keypoints
pose_lift_results = sorted(
pose_lift_results, key=lambda x: x.get('track_id', 1e4))
pred_3d_data_samples = merge_data_samples(pose_lift_results)
det_data_sample = merge_data_samples(pose_est_results)
pred_3d_instances = pred_3d_data_samples.get('pred_instances', None)
if args.num_instances < 0:
args.num_instances = len(pose_lift_results)
# Visualization
if visualizer is not None:
visualizer.add_datasample(
'result',
visualize_frame,
data_sample=pred_3d_data_samples,
det_data_sample=det_data_sample,
draw_gt=False,
dataset_2d=pose_det_dataset_name,
dataset_3d=pose_lift_dataset_name,
show=args.show,
draw_bbox=True,
kpt_thr=args.kpt_thr,
num_instances=args.num_instances,
wait_time=args.show_interval)
return pose_est_results, pose_est_results_list, pred_3d_instances, next_id
def main():
assert has_mmdet, 'Please install mmdet to run the demo.'
args = parse_args()
assert args.show or (args.output_root != '')
assert args.input != ''
assert args.det_config is not None
assert args.det_checkpoint is not None
detector = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
detector.cfg = adapt_mmdet_pipeline(detector.cfg)
pose_estimator = init_model(
args.pose_estimator_config,
args.pose_estimator_checkpoint,
device=args.device.lower())
assert isinstance(pose_estimator, TopdownPoseEstimator), 'Only "TopDown"' \
'model is supported for the 1st stage (2D pose detection)'
det_kpt_color = pose_estimator.dataset_meta.get('keypoint_colors', None)
det_dataset_skeleton = pose_estimator.dataset_meta.get(
'skeleton_links', None)
det_dataset_link_color = pose_estimator.dataset_meta.get(
'skeleton_link_colors', None)
pose_lifter = init_model(
args.pose_lifter_config,
args.pose_lifter_checkpoint,
device=args.device.lower())
assert isinstance(pose_lifter, PoseLifter), \
'Only "PoseLifter" model is supported for the 2nd stage ' \
'(2D-to-3D lifting)'
pose_lifter.cfg.visualizer.radius = args.radius
pose_lifter.cfg.visualizer.line_width = args.thickness
pose_lifter.cfg.visualizer.det_kpt_color = det_kpt_color
pose_lifter.cfg.visualizer.det_dataset_skeleton = det_dataset_skeleton
pose_lifter.cfg.visualizer.det_dataset_link_color = det_dataset_link_color
visualizer = VISUALIZERS.build(pose_lifter.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint
visualizer.set_dataset_meta(pose_lifter.dataset_meta)
if args.input == 'webcam':
input_type = 'webcam'
else:
input_type = mimetypes.guess_type(args.input)[0].split('/')[0]
if args.output_root == '':
save_output = False
else:
mmengine.mkdir_or_exist(args.output_root)
output_file = os.path.join(args.output_root,
os.path.basename(args.input))
if args.input == 'webcam':
output_file += '.mp4'
save_output = True
if args.save_predictions:
assert args.output_root != ''
args.pred_save_path = f'{args.output_root}/results_' \
f'{os.path.splitext(os.path.basename(args.input))[0]}.json'
if save_output:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
pose_est_results_list = []
pred_instances_list = []
if input_type == 'image':
frame = mmcv.imread(args.input, channel_order='rgb')
_, _, pred_3d_instances, _ = process_one_image(
args=args,
detector=detector,
frame=frame,
frame_idx=0,
pose_estimator=pose_estimator,
pose_est_results_last=[],
pose_est_results_list=pose_est_results_list,
next_id=0,
pose_lifter=pose_lifter,
visualize_frame=frame,
visualizer=visualizer)
if args.save_predictions:
# save prediction results
pred_instances_list = split_instances(pred_3d_instances)
if save_output:
frame_vis = visualizer.get_image()
mmcv.imwrite(mmcv.rgb2bgr(frame_vis), output_file)
elif input_type in ['webcam', 'video']:
next_id = 0
pose_est_results = []
if args.input == 'webcam':
video = cv2.VideoCapture(0)
else:
video = cv2.VideoCapture(args.input)
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if int(major_ver) < 3:
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = video.get(cv2.CAP_PROP_FPS)
video_writer = None
frame_idx = 0
while video.isOpened():
success, frame = video.read()
frame_idx += 1
if not success:
break
pose_est_results_last = pose_est_results
# First stage: 2D pose detection
# make person results for current image
(pose_est_results, pose_est_results_list, pred_3d_instances,
next_id) = process_one_image(
args=args,
detector=detector,
frame=frame,
frame_idx=frame_idx,
pose_estimator=pose_estimator,
pose_est_results_last=pose_est_results_last,
pose_est_results_list=pose_est_results_list,
next_id=next_id,
pose_lifter=pose_lifter,
visualize_frame=mmcv.bgr2rgb(frame),
visualizer=visualizer)
if args.save_predictions:
# save prediction results
pred_instances_list.append(
dict(
frame_id=frame_idx,
instances=split_instances(pred_3d_instances)))
if save_output:
frame_vis = visualizer.get_image()
if video_writer is None:
# the size of the image with visualization may vary
# depending on the presence of heatmaps
video_writer = cv2.VideoWriter(output_file, fourcc, fps,
(frame_vis.shape[1],
frame_vis.shape[0]))
video_writer.write(mmcv.rgb2bgr(frame_vis))
if args.show:
# press ESC to exit
if cv2.waitKey(5) & 0xFF == 27:
break
time.sleep(args.show_interval)
video.release()
if video_writer:
video_writer.release()
else:
args.save_predictions = False
raise ValueError(
f'file {os.path.basename(args.input)} has invalid format.')
if args.save_predictions:
with open(args.pred_save_path, 'w') as f:
json.dump(
dict(
meta_info=pose_lifter.dataset_meta,
instance_info=pred_instances_list),
f,
indent='\t')
print(f'predictions have been saved at {args.pred_save_path}')
if save_output:
input_type = input_type.replace('webcam', 'video')
print_log(
f'the output {input_type} has been saved at {output_file}',
logger='current',
level=logging.INFO)
if __name__ == '__main__':
main()

135
calculate.py Normal file
View File

@ -0,0 +1,135 @@
import numpy as np
import numpy as np
def angle_between(pkt1, pkt2, pkt3):
"""
Oblicza kąt między trzema punktami w stopniach z zachowaniem znaku.
pkt2 jest wierzchołkiem kąta.
Parameters:
pkt1, pkt2, pkt3 : array-like (x, y) lub (x, y, z)
Returns:
Kąt w stopniach (ujemny lub dodatni)
"""
pkt1 = np.array(pkt1[:2].cpu().numpy())
pkt2 = np.array(pkt2[:2].cpu().numpy())
pkt3 = np.array(pkt3[:2].cpu().numpy())
# wektory względem pkt2
a = pkt1 - pkt2
b = pkt3 - pkt2
# iloczyn skalarny i cosinus kąta
dot = np.dot(a, b)
norm = np.linalg.norm(a) * np.linalg.norm(b)
cos_theta = dot / norm
cos_theta = np.clip(cos_theta, -1.0, 1.0)
# kąt bez znaku
angle = np.degrees(np.arccos(cos_theta))
# znak z iloczynu wektorowego (w 2D to skalar = z-component)
cross = a[0]*b[1] - a[1]*b[0]
if cross < 0:
angle = -angle
return angle
def compare_poses(f1, f2):
# Odległość euklidesowa
l2_dist = np.linalg.norm(f1 - f2)
# Cosine similarity
cos_sim = np.dot(f1, f2) / (np.linalg.norm(f1) * np.linalg.norm(f2) + 1e-6)
return l2_dist, cos_sim
def compare_poses_boolean(f1, f2):
l2, cos_sim = compare_poses(f1, f2)
return l2 < 0.7 and cos_sim > 0.90
def center(keypoints):
mid_hip = (keypoints[11] + keypoints[12]) / 2 # left_hip=11, right_hip=12
keypoints = keypoints - mid_hip
return keypoints
def normalize_pose(keypoints):
"""
keypoints: np.array shape (17, 2) [x,y] dla COCO
Zwraca wektor cech odporny na skalę i przesunięcie
"""
# 1. translacja -> środek bioder jako początek układu
mid_hip = (keypoints[11] + keypoints[12]) / 2 # left_hip=11, right_hip=12
keypoints = keypoints - mid_hip
# 2. normalizacja skali -> odległość między barkami
shoulder_dist = np.linalg.norm(keypoints[5] - keypoints[6]) # left_shoulder=5, right_shoulder=6
if shoulder_dist > 0:
keypoints = keypoints / shoulder_dist
# 3. definicja segmentów (przykład: łokieć-ramię, nadgarstek-łokieć)
limbs = [
(5, 7), # ramię L
(7, 9), # przedramię L
(6, 8), # ramię P
(8, 10), # przedramię P
(11, 13), # udo L
(13, 15), # goleń L
(12, 14), # udo P
(14, 16), # goleń P
]
# 4. oblicz kąty
angles = []
for (a, b), (c, d) in zip(limbs[::2], limbs[1::2]): # np. (ramię, przedramię)
v1 = keypoints[b] - keypoints[a]
v2 = keypoints[d] - keypoints[c]
cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2) + 1e-6)
angle = np.arccos(np.clip(cos_angle, -1, 1))
angles.append(angle)
# 5. opcjonalnie: dodać wektory kończyn (znormalizowane)
vectors = []
for (a, b) in limbs:
v = keypoints[b] - keypoints[a]
v_norm = v / (np.linalg.norm(v) + 1e-6)
vectors.extend(v_norm)
# finalny wektor cech = kąty + wektory
feature_vector = np.concatenate([angles, vectors])
return feature_vector
def denormalize_pose(feature_vector):
"""
feature_vector: wynik normalize_pose
Zwraca przybliżone współrzędne keypoints (w układzie znormalizowanym)
"""
# 1. oddziel kąty i wektory
angles = feature_vector[:4]
vectors_flat = feature_vector[4:]
vectors = vectors_flat.reshape(-1, 2)
# 2. inicjalizacja keypoints
keypoints = np.zeros((17, 2))
# 3. przybliżona rekonstrukcja kończyn
limbs = [
(5, 7), (7, 9), (6, 8), (8, 10),
(11, 13), (13, 15), (12, 14), (14, 16)
]
for (a, b), v in zip(limbs, vectors):
keypoints[b] = keypoints[a] + v # przybliżona rekonstrukcja
# 4. punkt startowy (biodra) = (0,0), skalowanie w oryginale trzeba by przywrócić osobno
return keypoints

185
draw.py Normal file

File diff suppressed because one or more lines are too long

93
filter.py Normal file
View File

@ -0,0 +1,93 @@
from collections import deque
import numpy as np
def filter_moves(moves):
newMoves = []
lastTime = 0
ema = EMAFilter(0.2)
for i, move in enumerate(moves):
s = move[0] / 1000
if i != len(moves) - 1:
origS = s
s = s - lastTime
lastTime = origS
newMoves.append((s, ema.update(move[1])))
return newMoves
class MedianFilter:
def __init__(self, n_channels=8, window_size=3):
self.n = n_channels
self.buffers = [deque(maxlen=window_size) for _ in range(n_channels)]
def update(self, angles_deg):
smoothed = []
for i, ang in enumerate(angles_deg):
self.buffers[i].append(ang)
smoothed_ang = np.median(self.buffers[i])
smoothed.append(smoothed_ang)
return smoothed
class HybridFilter:
def __init__(self, alpha=0.7, n_channels=8, median_window=3):
self.alpha = alpha
self.n = n_channels
self.median_window = median_window
# Bufory do mediany dla każdego kanału
self.buffers = [deque(maxlen=median_window) for _ in range(n_channels)]
# Stan EMA (cos/sin)
self.cos_state = [None] * n_channels
self.sin_state = [None] * n_channels
def update(self, angles_deg):
smoothed = []
for i, ang in enumerate(angles_deg):
# wrzucamy do bufora mediany
self.buffers[i].append(ang)
med = np.median(self.buffers[i]) # filtr medianowy
ang_rad = np.deg2rad(med)
c, s = np.cos(ang_rad), np.sin(ang_rad)
if self.cos_state[i] is None:
self.cos_state[i] = c
self.sin_state[i] = s
else:
self.cos_state[i] = self.alpha * c + (1 - self.alpha) * self.cos_state[i]
self.sin_state[i] = self.alpha * s + (1 - self.alpha) * self.sin_state[i]
smoothed_ang = np.rad2deg(np.arctan2(self.sin_state[i], self.cos_state[i]))
smoothed.append(smoothed_ang)
return smoothed
class EMAFilter:
def __init__(self, alpha=0.2, n_channels=8):
self.alpha = alpha
self.cos_state = [None] * n_channels
self.sin_state = [None] * n_channels
self.n = n_channels
def update(self, angles_deg):
smoothed = []
for i, ang in enumerate(angles_deg):
ang_rad = np.deg2rad(ang)
c, s = np.cos(ang_rad), np.sin(ang_rad)
if self.cos_state[i] is None:
self.cos_state[i] = c
self.sin_state[i] = s
else:
self.cos_state[i] = self.alpha * c + (1 - self.alpha) * self.cos_state[i]
self.sin_state[i] = self.alpha * s + (1 - self.alpha) * self.sin_state[i]
smoothed_ang = np.rad2deg(np.arctan2(self.sin_state[i], self.cos_state[i]))
smoothed.append(smoothed_ang)
return smoothed

Binary file not shown.

File diff suppressed because one or more lines are too long

152
main.py Normal file
View File

@ -0,0 +1,152 @@
import pickle
import sys
from ultralytics import YOLO
import cv2
import time
from calculate import normalize_pose, compare_poses_boolean
from draw import draw_new
from video_methods import initialize_method
model = YOLO("yolo11x-pose.pt")
if len(sys.argv) == 2:
method_type = sys.argv[1]
else:
print("Podaj argument 'cam', albo 'net'.")
exit(1)
method = initialize_method(method_type)
do_pose_shot = False
def click_event(event, x, y, flags, param):
global do_pose_shot
if event == cv2.EVENT_LBUTTONDOWN: # lewy przycisk myszy
do_pose_shot = not do_pose_shot
def main():
last_time = time.time()
currTimeIndex = 0
currIndex = None
currMove = None
currStatus = "Zacznij tanczyc"
mehCount = 0
goodCount = 0
failCount = 0
failRate = 2
moves = []
with open('moves.pkl', 'rb') as f: # 'rb' = read binary
moves = pickle.load(f)
startValue = moves[0][0]
totalCount = len(moves)
for i, move in enumerate(moves):
moves[i] = (move[0] - startValue, move[1], move[2])
while True:
frame = method.receive_frame()
frame = cv2.flip(frame, 1)
results = model(frame, verbose=False)
current_time = time.time()
delta = current_time - last_time
last_time = current_time
fps = 1 / delta if delta > 0 else float('inf')
# print(f"\rDelta: {delta:.4f}s, FPS: {fps:.2f}", end="")
for result in results:
kpts = result.keypoints.data[0] if len(result.keypoints.data) else None
if kpts is None:
continue
img = frame
normalized = normalize_pose(result.keypoints.xy.cpu().numpy()[0])
cv2.imshow('you', draw_new(result.keypoints.xy.cpu()[0]))
if currTimeIndex != 0 and moves.index(find_closest(time.time() - currTimeIndex)) == len(moves) - 1:
mehCount = totalCount - failCount - goodCount
print(
f"PODSUMOWANIE: FAIL {failCount} MEH: {mehCount} PERFECT: {goodCount} PERCENTAGE: {(goodCount + (0.95 * mehCount)) / totalCount * 100}%")
exit(1)
if currMove is None:
if compare_poses_boolean(moves[0][1], normalized):
currIndex = 1
currTimeIndex = time.time()
deltaTime = time.time()
currStatus = f"Zaczoles tanczyc {currIndex}"
currMove = moves[0]
# thread = Thread(target=print_animation, args=(moves, False))
# thread.start()
else:
changed = False
closest = find_closest(time.time() - currTimeIndex)
cv2.imshow('Dots', draw_new(closest[2]))
if abs((time.time() - currTimeIndex) - moves[currIndex][0]) > failRate:
currStatus = f"FAIL!"
failCount += 1
if compare_poses_boolean(closest[1], normalized):
# delays += (time.time() - deltaTime - moves[0][0]) * 1000
# delaysCount += 1
currStatus = f"SUPER! {currIndex} Zostalo {len(moves)} Delay {(time.time() - currTimeIndex - closest[0]) / 1000}ms"
deltaTime = time.time()
currIndex = moves.index(closest) + 1
goodCount += 1
changed = True
if not changed and compare_poses_boolean(moves[currIndex][1], normalized):
# delays += (time.time() - deltaTime - moves[0][0]) * 1000
# delaysCount += 1
currStatus = f"SUPER! {currIndex} Zostalo {len(moves)} Delay {(time.time() - currTimeIndex - closest[0]) / 1000}ms"
deltaTime = time.time()
changed = True
currIndex += 1
goodCount += 1
# if do_pose_shot:
# moves.append((time.time() - startTime, normalize_pose(result.keypoints.xy.cpu().numpy()[0]), result.keypoints.xy.cpu()[0]))
# elif len(moves) != 0:
# with open('moves.pkl', 'wb') as f: # 'wb' = write binary
# pickle.dump(moves, f)
#
# exit(1)
cv2.putText(
img, # obraz
currStatus, # tekst
(50, 100), # pozycja (x, y) lewego dolnego rogu tekstu
cv2.FONT_HERSHEY_SIMPLEX, # czcionka
1, # rozmiar (skalowanie)
(0, 0, 255), # kolor (BGR) - tutaj czerwony
2, # grubość linii
cv2.LINE_AA # typ antyaliasingu
)
cv2.imshow('Klatka z kamerki', img)
cv2.setMouseCallback('Klatka z kamerki', click_event)
cv2.waitKey(1) # Czekaj na naciśnięcie klawisza
main()

BIN
moves.pkl Normal file

Binary file not shown.

26
ploting.py Normal file
View File

@ -0,0 +1,26 @@
import matplotlib.pyplot as plt
import queue
data_queue = queue.Queue()
x_data, y_data = [], []
fig, ax = plt.subplots()
line, = ax.plot([], [], 'r-')
def init():
ax.set_xlim(0, 100)
ax.set_ylim(0, 10)
return line,
def update(frame):
# sprawdzamy, czy są nowe dane w kolejce
while not data_queue.empty():
value = data_queue.get()
x_data.append(len(x_data))
y_data.append(value)
if len(x_data) > 100:
x_data.pop(0)
y_data.pop(0)
line.set_data(x_data, y_data)
return line,

48
receive_images.py Normal file
View File

@ -0,0 +1,48 @@
import socket
import cv2
import numpy as np
import struct
HOST = "0.0.0.0" # nasłuchuj na wszystkich interfejsach
PORT = 9999
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((HOST, PORT))
server_socket.listen(1)
print("Serwer nasłuchuje na port", PORT)
conn, addr = server_socket.accept()
print("Połączono z:", addr)
data = b""
payload_size = struct.calcsize("Q") # 8 bajtów na długość ramki
while True:
while len(data) < payload_size:
packet = conn.recv(4096)
if not packet:
break
data += packet
if not data:
break
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q", packed_msg_size)[0]
while len(data) < msg_size:
data += conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = np.frombuffer(frame_data, dtype=np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
cv2.imshow("Odebrany obraz", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
conn.close()
server_socket.close()
cv2.destroyAllWindows()

62
receiver.py Normal file
View File

@ -0,0 +1,62 @@
import socket
import struct
import numpy as np
import cv2
import time
HOST = '0.0.0.0'
PORT = 9999
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((HOST, PORT))
sock.listen(1)
conn, addr = sock.accept()
print(f"Connected by {addr}")
total_bytes_received = 0
start_time = time.time()
def recvall(sock, n):
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
try:
while True:
# Odbiór długości
packed_len = recvall(conn, 4)
if not packed_len:
break
length = struct.unpack('!I', packed_len)[0]
# Odbiór danych
data = recvall(conn, length)
if not data:
break
total_bytes_received += length
# Dekodowanie JPEG
img_array = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
cv2.imshow("Stream", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elapsed = time.time() - start_time
if elapsed >= 1.0:
print(f"Download speed: {total_bytes_received * 8 / 1e6:.2f} Mbps")
total_bytes_received = 0
start_time = time.time()
finally:
conn.close()
sock.close()
cv2.destroyAllWindows()

Binary file not shown.

Binary file not shown.

51
sender.py Normal file
View File

@ -0,0 +1,51 @@
import cv2
import socket
import zstandard as zstd
import struct
import time
from utils import resize_with_padding
SERVER_IP = '127.0.0.1'
SERVER_PORT = 9999
# Socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((SERVER_IP, SERVER_PORT))
# Kompresor Zstd
compressor = zstd.ZstdCompressor(level=10)
cap = cv2.VideoCapture(0) # kamerka
total_bytes_sent = 0
start_time = time.time()
JPEG_QUALITY = 25 # 0-100, im mniejsza, tym większa kompresja
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = resize_with_padding(frame)
# Konwersja do JPEG
_, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, JPEG_QUALITY])
data = buffer.tobytes()
# Wysyłanie długości + danych
sock.sendall(struct.pack('!I', len(data)))
sock.sendall(data)
total_bytes_sent += len(data)
elapsed = time.time() - start_time
if elapsed >= 1.0:
print(f"Upload speed: {total_bytes_sent * 8 / 1e6:.2f} Mbps") # w megabitach
total_bytes_sent = 0
start_time = time.time()
finally:
cap.release()
sock.close()

400
test.py

File diff suppressed because one or more lines are too long

38
utils.py Normal file
View File

@ -0,0 +1,38 @@
import cv2
import numpy as np
def recvall(sock, n):
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def find_closest(target):
global moves
return min(moves, key=lambda t: abs(t[0] - target))
def resize_with_padding(image, target_size=(640, 640)):
h, w = image.shape[:2]
target_w, target_h = target_size
# Oblicz współczynnik skalowania, zachowując proporcje
scale = min(target_w / w, target_h / h)
new_w, new_h = int(w * scale), int(h * scale)
# Resize obrazu
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA)
# Stwórz tło (czarne) o wymiarach docelowych
output_image = np.zeros((target_h, target_w, 3), dtype=np.uint8)
# Oblicz offsety do wyśrodkowania obrazu
x_offset = (target_w - new_w) // 2
y_offset = (target_h - new_h) // 2
# Wklej resized image na tło
output_image[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = resized_image
return output_image

65
video_methods.py Normal file
View File

@ -0,0 +1,65 @@
import socket
import struct
import time
import cv2
import numpy as np
from utils import recvall
methods = ["cam", "net"]
HOST = '0.0.0.0'
PORT = 9999
class Method:
def __init__(self, method_type):
self.method_type = method_type
if method_type is "cam":
self.cap = cv2.VideoCapture(0)
if not self.cap.isOpened():
print("Nie można otworzyć kamerki")
exit(1)
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((HOST, PORT))
self.sock.listen(1)
print(f"Oczekuje podłączenia na: {HOST}:{PORT}")
self.conn, addr = self.sock.accept()
print(f"Podłączono przez {addr}")
self.total_bytes_received = 0
self.start_time = time.time()
def receive_frame(self):
if self.method_type is "cam":
_, frame = self.cap.read()
if not _:
exit(1)
else:
packed_len = recvall(self.conn, 4)
if not packed_len:
exit(1)
length = struct.unpack('!I', packed_len)[0]
data = recvall(self.conn, length)
if not data:
exit(1)
self.total_bytes_received += length
img_array = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
return frame
def initialize_method(method_type):
if not method_type in methods:
return None
return Method(method_type)