[ADD] basis for async arm detection

This commit is contained in:
Sem van der Hoeven
2021-05-28 15:31:21 +02:00
parent a68c6a57bf
commit 40529f84b3
9 changed files with 119 additions and 56 deletions

View File

@@ -44,59 +44,34 @@ namespace computervision
net = readNetFromCaffe(protoFile, weightsFile);
}
void OpenPoseVideo::movementSkeleton(Mat inputImage) {
//string device = "cpu";
//string videoFile = "sample_video.mp4";
// Take arguments from commmand line
/*if (argc == 2)
{
if ((string)argv[1] == "gpu")
device = "gpu";
else
videoFile = argv[1];
}
else if (argc == 3)
{
videoFile = argv[1];
if ((string)argv[2] == "gpu")
device = "gpu";
}*/
void OpenPoseVideo::movementSkeleton(Mat inputImage, std::function<void(std::vector<Point>)> f) {
std::cout << "movement skeleton start" << std::endl;
int inWidth = 368;
int inHeight = 368;
float thresh = 0.01;
Mat frame, frameCopy;
Mat frame;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
/*if (device == "cpu")
{
cout << "Using CPU device" << endl;
net.setPreferableBackend(DNN_TARGET_CPU);
}
else if (device == "gpu")
{
cout << "Using GPU device" << endl;
net.setPreferableBackend(DNN_BACKEND_CUDA);
net.setPreferableTarget(DNN_TARGET_CUDA);
}*/
double t = (double)cv::getTickCount();
std::cout << "reading input image and blob" << std::endl;
frame = inputImage;
frameCopy = frame.clone();
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
std::cout << "done reading image and blob" << std::endl;
net.setInput(inpBlob);
std::cout << "done setting input to net" << std::endl;
Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
std::cout << "about to find position of boxy parts" << std::endl;
// find the position of the body parts
vector<Point> points(nPoints);
for (int n = 0; n < nPoints; n++)
@@ -114,31 +89,16 @@ namespace computervision
p.x *= (float)frameWidth / W;
p.y *= (float)frameHeight / H;
circle(frameCopy, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frameCopy, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);
circle(frame, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frame, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
int nPairs = sizeof(POSE_PAIRS) / sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x <= 0 || partA.y <= 0 || partB.x <= 0 || partB.y <= 0)
continue;
line(frame, partA, partB, Scalar(0, 255, 255), 8);
circle(frame, partA, 8, Scalar(0, 0, 255), -1);
circle(frame, partB, 8, Scalar(0, 0, 255), -1);
}
t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
cv::putText(frame, cv::format("time taken = %.2f sec", t), cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, .8, cv::Scalar(255, 50, 0), 2);
// imshow("Output-Keypoints", frameCopy);
imshow("Output-Skeleton", frame);
std::cout << "about to call points receiving method" << std::endl;
f(points);
}
}