11 Commits

Author SHA1 Message Date
Sem van der Hoeven
612adf6e9b [EDIT] mutex? 2021-05-31 15:50:06 +02:00
Sem van der Hoeven
25c99abb72 [EDIT] mutex with cap 2021-05-31 10:25:24 +02:00
Jasper
9d80cddbd1 [BUG] image still doesnt work 2021-05-28 16:49:13 +02:00
Sem van der Hoeven
40529f84b3 [ADD] basis for async arm detection 2021-05-28 15:31:21 +02:00
Jasper
a68c6a57bf [EDIT] edited file 2021-05-28 12:32:10 +02:00
Jasper
078a6ce66d [ADD] added all the files 2021-05-28 12:27:12 +02:00
Sem van der Hoeven
f1f1aac93d [ADD] comments 2021-05-25 15:54:02 +02:00
Sem van der Hoeven
563f465e2c [EDIT] remove unused methods 2021-05-25 15:46:53 +02:00
Sem van der Hoeven
05ae8ee019 [FEATURE] finished hand open/closed recognition 2021-05-25 14:49:04 +02:00
Sem van der Hoeven
3696e2eb30 [EDIT] improve hand detection with mask 2021-05-25 14:19:18 +02:00
Sem van der Hoeven
276aa1a449 [ADD] mask methods 2021-05-25 13:31:25 +02:00
18 changed files with 5531 additions and 93 deletions

2
.gitignore vendored
View File

@@ -428,4 +428,6 @@ FodyWeavers.xsd
**/docs/* **/docs/*
**/doc/* **/doc/*
**/pose_iter_160000.caffemodel
# End of https://www.toptal.com/developers/gitignore/api/c++,visualstudio,visualstudiocode,opencv # End of https://www.toptal.com/developers/gitignore/api/c++,visualstudio,visualstudiocode,opencv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -151,9 +151,16 @@ namespace computervision
drawVectorPoints(frame, filtered_finger_points, color_yellow, false); drawVectorPoints(frame, filtered_finger_points, color_yellow, false);
putText(frame, to_string(filtered_finger_points.size()), center_bounding_rect, FONT_HERSHEY_PLAIN, 3, color_purple); putText(frame, to_string(filtered_finger_points.size()), center_bounding_rect, FONT_HERSHEY_PLAIN, 3, color_purple);
amount_of_fingers = filtered_finger_points.size();
return contours_image; return contours_image;
} }
int FingerCount::getAmountOfFingers()
{
return amount_of_fingers;
}
double FingerCount::findPointsDistance(Point a, Point b) { double FingerCount::findPointsDistance(Point a, Point b) {
Point difference = a - b; Point difference = a - b;
return sqrt(difference.ddot(difference)); return sqrt(difference.ddot(difference));

View File

@@ -24,6 +24,13 @@ namespace computervision
*/ */
Mat findFingersCount(Mat input_image, Mat frame); Mat findFingersCount(Mat input_image, Mat frame);
/**
* @brief gets the currently held-up finger count.
*
* @return the currently held-up finger count
*/
int getAmountOfFingers();
private: private:
// colors to use // colors to use
Scalar color_blue; Scalar color_blue;
@@ -34,6 +41,8 @@ namespace computervision
Scalar color_yellow; Scalar color_yellow;
Scalar color_purple; Scalar color_purple;
int amount_of_fingers;
/** /**
* @brief finds the distance between 2 points. * @brief finds the distance between 2 points.
* *

View File

@@ -1,47 +1,66 @@
#include <opencv2/highgui.hpp>
#include "ObjectDetection.h" #include "ObjectDetection.h"
#include "BackgroundRemover.h" #include "BackgroundRemover.h"
#include "SkinDetector.h" #include "SkinDetector.h"
#include "FaceDetector.h" #include "FaceDetector.h"
#include "FingerCount.h" #include "FingerCount.h"
#include "VideoCapture.h"
using namespace videocapture;
namespace computervision namespace computervision
{ {
cv::VideoCapture cap(0);
cv::Mat img, imgGray, img2, img2Gray, img3, img4; cv::Mat img, imgGray, img2, img2Gray, img3, img4;
int handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight;
bool handMaskGenerated = false;
Mat frame, frameOut, handMask, foreground, fingerCountDebug; Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover; BackgroundRemover backgroundRemover;
SkinDetector skinDetector; SkinDetector skinDetector;
FaceDetector faceDetector; FaceDetector faceDetector;
FingerCount fingerCount; FingerCount fingerCount;
ObjectDetection::ObjectDetection() ObjectDetection::ObjectDetection()
{ {
} }
bool ObjectDetection::setup() cv::Mat ObjectDetection::readCamera() {
{ /*videocapture::getMutex()->lock();
if (!cap.isOpened()) {
cout << "Can't find camera!" << endl; videocapture::getCap().read(img);
return false;
videocapture::getMutex()->unlock();*/
img = videocapture::readFrame();
return img;
} }
cap.read(frame); bool ObjectDetection::detectHand(Mat cameraFrame)
frameOut = frame.clone(); {
Mat inputFrame = generateHandMaskSquare(cameraFrame);
frameOut = inputFrame.clone();
// detect skin color
skinDetector.drawSkinColorSampler(frameOut); skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame); // remove background from image
foreground = backgroundRemover.getForeground(inputFrame);
faceDetector.removeFaces(frame, foreground); // detect the hand contours
handMask = skinDetector.getSkinMask(foreground); handMask = skinDetector.getSkinMask(foreground);
// count the amount of fingers and put the info on the matrix
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut); fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
//backgroundRemover.calibrate(frame); // get the amount of fingers
int fingers_amount = fingerCount.getAmountOfFingers();
// draw the hand rectangle on the camera input, and draw text showing if the hand is open or closed.
drawHandMaskRect(&cameraFrame);
string hand_text = fingers_amount > 0 ? "open" : "closed";
putText(cameraFrame,hand_text, Point(10, 75), FONT_HERSHEY_PLAIN, 2.0, Scalar(255, 0, 255),3);
imshow("camera", cameraFrame);
imshow("output", frameOut); imshow("output", frameOut);
imshow("foreground", foreground); imshow("foreground", foreground);
@@ -50,18 +69,23 @@ namespace computervision
int key = waitKey(1); int key = waitKey(1);
if (key == 98) // b if (key == 98) // b, calibrate the background
backgroundRemover.calibrate(frame); backgroundRemover.calibrate(inputFrame);
else if (key == 115) // s else if (key == 115) // s, calibrate the skin color
skinDetector.calibrate(frame); skinDetector.calibrate(inputFrame);
return true; return fingers_amount > 0;
} }
void ObjectDetection::calculateDifference() void ObjectDetection::calculateDifference()
{ {
cap.read(img); //videocapture::getMutex()->lock();
cap.read(img2); //videocapture::getCap().read(img);
//videocapture::getCap().read(img2);
//videocapture::getMutex()->unlock()
img = videocapture::readFrame();
img2 = videocapture::readFrame();
cv::cvtColor(img, imgGray, cv::COLOR_RGBA2GRAY); cv::cvtColor(img, imgGray, cv::COLOR_RGBA2GRAY);
cv::cvtColor(img2, img2Gray, cv::COLOR_RGBA2GRAY); cv::cvtColor(img2, img2Gray, cv::COLOR_RGBA2GRAY);
@@ -72,14 +96,32 @@ namespace computervision
imshow("threshold", img4); imshow("threshold", img4);
} }
void ObjectDetection::detect()
{
int key = waitKey(1);
if (key == 98) // b cv::Mat ObjectDetection::generateHandMaskSquare(cv::Mat img)
backgroundRemover.calibrate(frame); {
else if (key == 115) // s handMaskStartXPos = 20;
skinDetector.calibrate(frame); handMaskStartYPos = img.rows / 5;
handMaskWidth = img.cols / 3;
handMaskHeight = img.cols / 3;
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
cv::Mat dstImg = cv::Mat::zeros(img.size(), img.type());
cv::rectangle(mask, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255), -1);
img.copyTo(dstImg, mask);
handMaskGenerated = true;
return dstImg;
}
bool ObjectDetection::drawHandMaskRect(cv::Mat* input)
{
if (!handMaskGenerated) return false;
rectangle(*input, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255));
return true;
} }
void ObjectDetection::showWebcam() void ObjectDetection::showWebcam()

View File

@@ -22,13 +22,7 @@ namespace computervision
* *
*/ */
ObjectDetection(); ObjectDetection();
/**
* @brief Initializes the object detection, captures a frame and modifies it
* so it is ready to use for object detection
*
* @return return true if webcam is connected, returns false if it isn't
*/
bool setup();
/** /**
* @brief Displays an image of the current webcam-footage * @brief Displays an image of the current webcam-footage
* *
@@ -40,11 +34,38 @@ namespace computervision
* *
*/ */
void calculateDifference(); void calculateDifference();
/** /**
* @brief Listens for keypresses and handles them * @brief generates the square that will hold the mask in which the hand will be detected.
* *
* @param img the current camear frame
* @return a matrix containing the mask
*/ */
void detect(); cv::Mat generateHandMaskSquare(cv::Mat img);
/**
* @brief reads the camera and returns it in a matrix.
*
* @return the camera frame in a matrix
*/
cv::Mat readCamera();
/**
* @brief detects a hand based on the given hand mask input frame.
*
* @param inputFrame the input frame from the camera
* @return true if hand is open, false if hand is closed
*/
bool detectHand(cv::Mat cameraFrame);
/**
* @brief draws the hand mask rectangle on the given input matrix.
*
* @param input the input matrix to draw the rectangle on
*/
bool drawHandMaskRect(cv::Mat *input);
cv::VideoCapture getCap();
}; };

View File

@@ -0,0 +1,111 @@
#include "OpenPoseVideo.h"
using namespace std;
using namespace cv;
using namespace cv::dnn;
namespace computervision
{
#define MPI
#ifdef MPI
const int POSE_PAIRS[7][2] =
{
{0,1}, {1,2}, {2,3},
{3,4}, {1,5}, {5,6},
{6,7}
};
string protoFile = "res/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt";
string weightsFile = "res/pose/mpi/pose_iter_160000.caffemodel";
int nPoints = 8;
#endif
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0}, {0,14},
{14,16}, {0,15}, {15,17}
};
string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
Net net;
int inWidth = 368;
int inHeight = 368;
float thresh = 0.01;
void OpenPoseVideo::setup() {
net = readNetFromCaffe(protoFile, weightsFile);
}
cv::Mat OpenPoseVideo::getBlobFromImage(cv::Mat inputImage)
{
Mat frame;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
double t = (double)cv::getTickCount();
std::cout << "reading input image and blob" << std::endl;
frame = inputImage.clone();
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
return inpBlob;
}
void OpenPoseVideo::movementSkeleton(Mat inputImage, Mat inpBlob, std::function<void(std::vector<Point>)> f) {
std::cout << "movement skeleton start" << std::endl;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
std::cout << "done reading image and blob" << std::endl;
net.setInput(inpBlob);
std::cout << "done setting input to net" << std::endl;
Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
std::cout << "about to find position of boxy parts" << std::endl;
// find the position of the body parts
vector<Point> points(nPoints);
for (int n = 0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0, n));
Point2f p(-1, -1);
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W;
p.y *= (float)frameHeight / H;
/*circle(frame, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frame, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);*/
}
points[n] = p;
}
//cv::putText(frame, cv::format("time taken = %.2f sec", t), cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, .8, cv::Scalar(255, 50, 0), 2);
// imshow("Output-Keypoints", frameCopy);
/*imshow("Output-Skeleton", frame);*/
std::cout << "about to call points receiving method" << std::endl;
f(points);
}
}

View File

@@ -0,0 +1,20 @@
#pragma once
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
namespace computervision
{
class OpenPoseVideo{
private:
public:
cv::Mat getBlobFromImage(cv::Mat inputImage);
void movementSkeleton(Mat inputImage, Mat inpBlob, std::function<void(std::vector<Point>)> f);
void setup();
};
}

View File

@@ -22,7 +22,7 @@ namespace computervision
void SkinDetector::drawSkinColorSampler(Mat input) { void SkinDetector::drawSkinColorSampler(Mat input) {
int frameWidth = input.size().width, frameHeight = input.size().height; int frameWidth = input.size().width, frameHeight = input.size().height;
int rectangleSize = 20; int rectangleSize = 25;
Scalar rectangleColor = Scalar(255, 0, 255); Scalar rectangleColor = Scalar(255, 0, 255);
skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize); skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize);

View File

@@ -0,0 +1,33 @@
#include "VideoCapture.h"
#include <mutex>
#include <iostream>
namespace videocapture{
static cv::VideoCapture cap(0);
static std::mutex mtx;
cv::VideoCapture getCap() {
cap.release();
return cap;
}
cv::Mat readFrame()
{
std::cout << "reading frame" << std::endl;
cv::Mat camFrame, videoFrame;
mtx.lock();
bool res = cap.read(camFrame);
std::cout << (res ? "reading worked" : "reading failed") << std::endl;
videoFrame = camFrame.clone();
mtx.unlock();
return videoFrame;
}
std::mutex* getMutex()
{
return &mtx;
}
}

View File

@@ -0,0 +1,12 @@
#pragma once
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <mutex>
namespace videocapture {
cv::VideoCapture getCap();
std::mutex* getMutex();
cv::Mat readFrame();
}

View File

@@ -0,0 +1,41 @@
#include <iostream>
#include "async_arm_detection.h"
#include "../OpenPoseVideo.h"
#include <thread>
#include "../VideoCapture.h"
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
namespace computervision
{
AsyncArmDetection::AsyncArmDetection()
{
}
void AsyncArmDetection::run_arm_detection()
{
}
void AsyncArmDetection::start(std::function<void(std::vector<Point>)> points_ready_func, OpenPoseVideo op)
{
auto lambda = [](cv::Mat img, std::function<void(std::vector<Point>)> f, OpenPoseVideo op, cv::Mat inpBlob) {
std::cout << "STARTING THREAD LAMBDA" << std::endl;
//imshow("image", img); 255, Size(368, 368), Scalar(0, 0, 0), false, false);
op.movementSkeleton(img, inpBlob, f);
//}
};
cv::Mat img = videocapture::readFrame();
std::cout << "starting function" << std::endl;
cv::Mat inpBlob = op.getBlobFromImage(videocapture::readFrame());
std::thread async_arm_detect_thread(lambda, img, points_ready_func, op, inpBlob);
}
}

View File

@@ -0,0 +1,22 @@
#pragma once
#include <vector>
#include <opencv2/core/types.hpp>
#include <opencv2/videoio.hpp>
#include <functional>
#include "../OpenPoseVideo.h"
namespace computervision
{
class AsyncArmDetection
{
public:
AsyncArmDetection(void);
void start(std::function<void(std::vector<cv::Point>)>, computervision::OpenPoseVideo op);
private:
void run_arm_detection();
};
}

View File

@@ -1,9 +1,12 @@
#include <GL/glew.h> #include <GL/glew.h>
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
#include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_transform.hpp>
#include <functional>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h" #include "stb_image.h"
#include <ostream> #include <ostream>
#include <stdlib.h>
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <opencv2/videoio.hpp> #include <opencv2/videoio.hpp>
@@ -17,6 +20,10 @@
#include "toolbox/toolbox.h" #include "toolbox/toolbox.h"
#include "computervision/ObjectDetection.h" #include "computervision/ObjectDetection.h"
//#include "computervision/OpenPoseImage.h"
#include "computervision/OpenPoseVideo.h"
#include "computervision/async/async_arm_detection.h"
#pragma comment(lib, "glfw3.lib") #pragma comment(lib, "glfw3.lib")
#pragma comment(lib, "glew32s.lib") #pragma comment(lib, "glew32s.lib")
@@ -25,7 +32,15 @@
static double UpdateDelta(); static double UpdateDelta();
static GLFWwindow* window; static GLFWwindow* window;
computervision::AsyncArmDetection as;
computervision::OpenPoseVideo openPoseVideo;
void retrieve_points(std::vector<Point> arm_points)
{
std::cout << "got points!!" << std::endl;
std::cout << "points: " << arm_points << std::endl;
as.start(retrieve_points, openPoseVideo);
}
int main(void) int main(void)
{ {
@@ -63,10 +78,21 @@ int main(void)
// create object detection object instance // create object detection object instance
computervision::ObjectDetection objDetect; computervision::ObjectDetection objDetect;
//computervision::OpenPoseImage openPoseImage;
openPoseVideo.setup();
// set up object detection // set up object detection
//objDetect.setup(); //objDetect.setup();
cv::Mat cameraFrame;
//openPoseVideo.setup();
as.start(retrieve_points, openPoseVideo);
// Main game loop // Main game loop
while (!glfwWindowShouldClose(window)) while (!glfwWindowShouldClose(window))
@@ -81,10 +107,11 @@ int main(void)
shader.Start(); shader.Start();
shader.LoadViewMatrix(camera); shader.LoadViewMatrix(camera);
render_engine::renderer::Render(entity, shader); render_engine::renderer::Render(entity, shader);
//objDetect.setup(); cameraFrame = objDetect.readCamera();
objDetect.calculateDifference(); //objDetect.detectHand(cameraFrame);
// Finish up // Finish up
shader.Stop(); shader.Stop();

View File

@@ -18,7 +18,7 @@ namespace render_engine
void Init(shaders::StaticShader& shader) void Init(shaders::StaticShader& shader)
{ {
const glm::mat4 projectionMatrix = const glm::mat4 projectionMatrix =
glm::perspective(glm::radians(FOV), (WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE); glm::perspective(glm::radians(FOV), (float)(WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE);
shader.Start(); shader.Start();
shader.LoadProjectionMatrix(projectionMatrix); shader.LoadProjectionMatrix(projectionMatrix);

View File

@@ -19,11 +19,14 @@
</ProjectConfiguration> </ProjectConfiguration>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp" />
<ClCompile Include="src\computervision\FaceDetector.cpp" /> <ClCompile Include="src\computervision\FaceDetector.cpp" />
<ClCompile Include="src\computervision\ObjectDetection.cpp" /> <ClCompile Include="src\computervision\ObjectDetection.cpp" />
<ClCompile Include="src\computervision\OpenPoseVideo.cpp" />
<ClCompile Include="src\computervision\SkinDetector.cpp" /> <ClCompile Include="src\computervision\SkinDetector.cpp" />
<ClCompile Include="src\computervision\FingerCount.cpp" /> <ClCompile Include="src\computervision\FingerCount.cpp" />
<ClCompile Include="src\computervision\BackgroundRemover.cpp" /> <ClCompile Include="src\computervision\BackgroundRemover.cpp" />
<ClCompile Include="src\computervision\VideoCapture.cpp" />
<ClCompile Include="src\entities\camera.cpp" /> <ClCompile Include="src\entities\camera.cpp" />
<ClCompile Include="src\entities\entity.cpp" /> <ClCompile Include="src\entities\entity.cpp" />
<ClCompile Include="src\main.cpp" /> <ClCompile Include="src\main.cpp" />
@@ -35,11 +38,14 @@
<ClCompile Include="src\toolbox\toolbox.cpp" /> <ClCompile Include="src\toolbox\toolbox.cpp" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="src\computervision\async\async_arm_detection.h" />
<ClInclude Include="src\computervision\FaceDetector.h" /> <ClInclude Include="src\computervision\FaceDetector.h" />
<ClInclude Include="src\computervision\FingerCount.h" /> <ClInclude Include="src\computervision\FingerCount.h" />
<ClInclude Include="src\computervision\BackgroundRemover.h" /> <ClInclude Include="src\computervision\BackgroundRemover.h" />
<ClInclude Include="src\computervision\OpenPoseVideo.h" />
<ClInclude Include="src\computervision\SkinDetector.h" /> <ClInclude Include="src\computervision\SkinDetector.h" />
<ClInclude Include="src\computervision\ObjectDetection.h" /> <ClInclude Include="src\computervision\ObjectDetection.h" />
<ClInclude Include="src\computervision\VideoCapture.h" />
<ClInclude Include="src\entities\camera.h" /> <ClInclude Include="src\entities\camera.h" />
<ClInclude Include="src\entities\entity.h" /> <ClInclude Include="src\entities\entity.h" />
<ClInclude Include="src\models\model.h" /> <ClInclude Include="src\models\model.h" />
@@ -54,6 +60,11 @@
<ItemGroup> <ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" /> <Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
</ItemGroup>
<PropertyGroup Label="Globals"> <PropertyGroup Label="Globals">
<VCProjectVersion>16.0</VCProjectVersion> <VCProjectVersion>16.0</VCProjectVersion>
<ProjectGuid>{A7ECF1BE-DB22-4BF7-BFF6-E3BF72691EE6}</ProjectGuid> <ProjectGuid>{A7ECF1BE-DB22-4BF7-BFF6-E3BF72691EE6}</ProjectGuid>

View File

@@ -57,6 +57,15 @@
<ClCompile Include="src\computervision\BackgroundRemover.cpp"> <ClCompile Include="src\computervision\BackgroundRemover.cpp">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="src\computervision\OpenPoseVideo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\computervision\VideoCapture.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="src\entities\Camera.h"> <ClInclude Include="src\entities\Camera.h">
@@ -104,8 +113,22 @@
<ClInclude Include="src\computervision\BackgroundRemover.h"> <ClInclude Include="src\computervision\BackgroundRemover.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="src\computervision\OpenPoseVideo.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\async\async_arm_detection.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\VideoCapture.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" /> <Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
</ItemGroup>
</Project> </Project>