Compare commits

13 Commits

Author SHA1 Message Date
Sem van der Hoeven
921609de5d [EDIT] stuff 2021-06-04 11:39:23 +02:00
Sem van der Hoeven
fe94b0f83d [FIX] showing of pose detection points 2021-06-04 10:55:53 +02:00
Sem van der Hoeven
ab30c41bee [FIX] crashing with pose detection 2021-06-02 10:41:50 +02:00
Sem van der Hoeven
1a149b8b7e [ADD] static camera instance 2021-06-02 10:05:09 +02:00
Sem van der Hoeven
cc7cb37840 [ADD] caffemodel project entry 2021-06-02 09:44:30 +02:00
Sem van der Hoeven
40529f84b3 [ADD] basis for async arm detection 2021-05-28 15:31:21 +02:00
Jasper
a68c6a57bf [EDIT] edited file 2021-05-28 12:32:10 +02:00
Jasper
078a6ce66d [ADD] added all the files 2021-05-28 12:27:12 +02:00
Sem van der Hoeven
f1f1aac93d [ADD] comments 2021-05-25 15:54:02 +02:00
Sem van der Hoeven
563f465e2c [EDIT] remove unused methods 2021-05-25 15:46:53 +02:00
Sem van der Hoeven
05ae8ee019 [FEATURE] finished hand open/closed recognition 2021-05-25 14:49:04 +02:00
Sem van der Hoeven
3696e2eb30 [EDIT] improve hand detection with mask 2021-05-25 14:19:18 +02:00
Sem van der Hoeven
276aa1a449 [ADD] mask methods 2021-05-25 13:31:25 +02:00
17 changed files with 5506 additions and 92 deletions

2
.gitignore vendored
View File

@@ -428,4 +428,6 @@ FodyWeavers.xsd
**/docs/* **/docs/*
**/doc/* **/doc/*
**/pose_iter_160000.caffemodel
# End of https://www.toptal.com/developers/gitignore/api/c++,visualstudio,visualstudiocode,opencv # End of https://www.toptal.com/developers/gitignore/api/c++,visualstudio,visualstudiocode,opencv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -151,9 +151,16 @@ namespace computervision
drawVectorPoints(frame, filtered_finger_points, color_yellow, false); drawVectorPoints(frame, filtered_finger_points, color_yellow, false);
putText(frame, to_string(filtered_finger_points.size()), center_bounding_rect, FONT_HERSHEY_PLAIN, 3, color_purple); putText(frame, to_string(filtered_finger_points.size()), center_bounding_rect, FONT_HERSHEY_PLAIN, 3, color_purple);
amount_of_fingers = filtered_finger_points.size();
return contours_image; return contours_image;
} }
int FingerCount::getAmountOfFingers()
{
return amount_of_fingers;
}
double FingerCount::findPointsDistance(Point a, Point b) { double FingerCount::findPointsDistance(Point a, Point b) {
Point difference = a - b; Point difference = a - b;
return sqrt(difference.ddot(difference)); return sqrt(difference.ddot(difference));

View File

@@ -24,6 +24,13 @@ namespace computervision
*/ */
Mat findFingersCount(Mat input_image, Mat frame); Mat findFingersCount(Mat input_image, Mat frame);
/**
* @brief gets the currently held-up finger count.
*
* @return the currently held-up finger count
*/
int getAmountOfFingers();
private: private:
// colors to use // colors to use
Scalar color_blue; Scalar color_blue;
@@ -34,6 +41,8 @@ namespace computervision
Scalar color_yellow; Scalar color_yellow;
Scalar color_purple; Scalar color_purple;
int amount_of_fingers;
/** /**
* @brief finds the distance between 2 points. * @brief finds the distance between 2 points.
* *

View File

@@ -1,47 +1,70 @@
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "ObjectDetection.h" #include "ObjectDetection.h"
#include "BackgroundRemover.h" #include "BackgroundRemover.h"
#include "SkinDetector.h" #include "SkinDetector.h"
#include "FaceDetector.h" #include "FaceDetector.h"
#include "FingerCount.h" #include "FingerCount.h"
#include "async/StaticCameraInstance.h"
namespace computervision namespace computervision
{ {
cv::VideoCapture cap(0);
cv::Mat img, imgGray, img2, img2Gray, img3, img4; cv::Mat img, imgGray, img2, img2Gray, img3, img4;
int handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight;
bool handMaskGenerated = false;
Mat frame, frameOut, handMask, foreground, fingerCountDebug; Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover; BackgroundRemover backgroundRemover;
SkinDetector skinDetector; SkinDetector skinDetector;
FaceDetector faceDetector; FaceDetector faceDetector;
FingerCount fingerCount; FingerCount fingerCount;
cv::VideoCapture cap = static_camera::getCap();
ObjectDetection::ObjectDetection() ObjectDetection::ObjectDetection()
{ {
} }
bool ObjectDetection::setup() cv::Mat ObjectDetection::readCamera() {
cap.read(img);
return img;
}
cv::VideoCapture ObjectDetection::getCap()
{ {
if (!cap.isOpened()) { return cap;
cout << "Can't find camera!" << endl; }
return false;
}
cap.read(frame); bool ObjectDetection::detectHand(Mat cameraFrame)
frameOut = frame.clone(); {
Mat inputFrame = generateHandMaskSquare(cameraFrame);
frameOut = inputFrame.clone();
// detect skin color
skinDetector.drawSkinColorSampler(frameOut); skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame); // remove background from image
foreground = backgroundRemover.getForeground(inputFrame);
faceDetector.removeFaces(frame, foreground); // detect the hand contours
handMask = skinDetector.getSkinMask(foreground); handMask = skinDetector.getSkinMask(foreground);
// count the amount of fingers and put the info on the matrix
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut); fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
//backgroundRemover.calibrate(frame); // get the amount of fingers
int fingers_amount = fingerCount.getAmountOfFingers();
// draw the hand rectangle on the camera input, and draw text showing if the hand is open or closed.
drawHandMaskRect(&cameraFrame);
string hand_text = fingers_amount > 0 ? "open" : "closed";
putText(cameraFrame,hand_text, Point(10, 75), FONT_HERSHEY_PLAIN, 2.0, Scalar(255, 0, 255),3);
imshow("camera", cameraFrame);
imshow("output", frameOut); imshow("output", frameOut);
imshow("foreground", foreground); imshow("foreground", foreground);
@@ -50,12 +73,12 @@ namespace computervision
int key = waitKey(1); int key = waitKey(1);
if (key == 98) // b if (key == 98) // b, calibrate the background
backgroundRemover.calibrate(frame); backgroundRemover.calibrate(inputFrame);
else if (key == 115) // s else if (key == 115) // s, calibrate the skin color
skinDetector.calibrate(frame); skinDetector.calibrate(inputFrame);
return true; return fingers_amount > 0;
} }
void ObjectDetection::calculateDifference() void ObjectDetection::calculateDifference()
@@ -72,14 +95,32 @@ namespace computervision
imshow("threshold", img4); imshow("threshold", img4);
} }
void ObjectDetection::detect()
{
int key = waitKey(1);
if (key == 98) // b cv::Mat ObjectDetection::generateHandMaskSquare(cv::Mat img)
backgroundRemover.calibrate(frame); {
else if (key == 115) // s handMaskStartXPos = 20;
skinDetector.calibrate(frame); handMaskStartYPos = img.rows / 5;
handMaskWidth = img.cols / 3;
handMaskHeight = img.cols / 3;
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
cv::Mat dstImg = cv::Mat::zeros(img.size(), img.type());
cv::rectangle(mask, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255), -1);
img.copyTo(dstImg, mask);
handMaskGenerated = true;
return dstImg;
}
bool ObjectDetection::drawHandMaskRect(cv::Mat* input)
{
if (!handMaskGenerated) return false;
rectangle(*input, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255));
return true;
} }
void ObjectDetection::showWebcam() void ObjectDetection::showWebcam()

View File

@@ -22,13 +22,7 @@ namespace computervision
* *
*/ */
ObjectDetection(); ObjectDetection();
/**
* @brief Initializes the object detection, captures a frame and modifies it
* so it is ready to use for object detection
*
* @return return true if webcam is connected, returns false if it isn't
*/
bool setup();
/** /**
* @brief Displays an image of the current webcam-footage * @brief Displays an image of the current webcam-footage
* *
@@ -40,11 +34,39 @@ namespace computervision
* *
*/ */
void calculateDifference(); void calculateDifference();
/** /**
* @brief Listens for keypresses and handles them * @brief generates the square that will hold the mask in which the hand will be detected.
* *
* @param img the current camear frame
* @return a matrix containing the mask
*/ */
void detect(); cv::Mat generateHandMaskSquare(cv::Mat img);
/**
* @brief reads the camera and returns it in a matrix.
*
* @return the camera frame in a matrix
*/
cv::Mat readCamera();
/**
* @brief detects a hand based on the given hand mask input frame.
*
* @param inputFrame the input frame from the camera
* @return true if hand is open, false if hand is closed
*/
bool detectHand(cv::Mat cameraFrame);
/**
* @brief draws the hand mask rectangle on the given input matrix.
*
* @param input the input matrix to draw the rectangle on
*/
bool drawHandMaskRect(cv::Mat *input);
cv::VideoCapture getCap();
}; };

View File

@@ -0,0 +1,108 @@
#include "OpenPoseVideo.h"
using namespace std;
using namespace cv;
using namespace cv::dnn;
namespace computervision
{
#define MPI
#ifdef MPI
const int POSE_PAIRS[7][2] =
{
{0,1}, {1,2}, {2,3},
{3,4}, {1,5}, {5,6},
{6,7}
};
string protoFile = "res/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt";
string weightsFile = "res/pose/mpi/pose_iter_160000.caffemodel";
int nPoints = 8;
#endif
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0}, {0,14},
{14,16}, {0,15}, {15,17}
};
string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
Net net;
void OpenPoseVideo::setup() {
net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
}
void OpenPoseVideo::movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f) {
std::cout << "movement skeleton start" << std::endl;
int inWidth = 368;
int inHeight = 368;
float thresh = 0.01;
Mat frame;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
double t = (double)cv::getTickCount();
std::cout << "reading input image and blob" << std::endl;
frame = inputImage;
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
std::cout << "done reading image and blob" << std::endl;
net.setInput(inpBlob);
std::cout << "done setting input to net" << std::endl;
Mat output = net.forward();
std::cout << "time took to set input and forward: " << t << std::endl;
int H = output.size[2];
int W = output.size[3];
std::cout << "about to find position of boxy parts" << std::endl;
// find the position of the body parts
vector<Point> points(nPoints);
for (int n = 0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0, n));
Point2f p(-1, -1);
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W;
p.y *= (float)frameHeight / H;
circle(frame, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frame, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
cv::putText(frame, cv::format("time taken = %.2f sec", t), cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, .8, cv::Scalar(255, 50, 0), 2);
std::cout << "time taken: " << t << std::endl;
//imshow("Output-Keypoints", frame);
//imshow("Output-Skeleton", frame);
std::cout << "about to call points receiving method" << std::endl;
f(points,frame);
}
}

View File

@@ -0,0 +1,19 @@
#pragma once
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
namespace computervision
{
class OpenPoseVideo{
private:
public:
void movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f);
void setup();
};
}

View File

@@ -22,7 +22,7 @@ namespace computervision
void SkinDetector::drawSkinColorSampler(Mat input) { void SkinDetector::drawSkinColorSampler(Mat input) {
int frameWidth = input.size().width, frameHeight = input.size().height; int frameWidth = input.size().width, frameHeight = input.size().height;
int rectangleSize = 20; int rectangleSize = 25;
Scalar rectangleColor = Scalar(255, 0, 255); Scalar rectangleColor = Scalar(255, 0, 255);
skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize); skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize);

View File

@@ -0,0 +1,12 @@
#pragma once
#include <opencv2/videoio.hpp>
namespace static_camera
{
static cv::VideoCapture getCap()
{
static cv::VideoCapture cap(0);
return cap;
}
};

View File

@@ -0,0 +1,46 @@
#include <iostream>
#include "async_arm_detection.h"
#include "../OpenPoseVideo.h"
#include <thread>
#include "StaticCameraInstance.h"
namespace computervision
{
AsyncArmDetection::AsyncArmDetection()
{
}
void AsyncArmDetection::run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
VideoCapture cap = static_camera::getCap();
std::cout << "STARTING THREAD LAMBDA" << std::endl;
/*cv::VideoCapture cap = static_camera::getCap();*/
if (!cap.isOpened())
{
std::cout << "capture was closed, opening..." << std::endl;
cap.open(0);
}
while (true)
{
Mat img;
cap.read(img);
op.movementSkeleton(img, points_ready_func);
}
}
void AsyncArmDetection::start(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
std::cout << "starting function" << std::endl;
std::thread async_arm_detect_thread(&AsyncArmDetection::run_arm_detection,this, points_ready_func, op);
async_arm_detect_thread.detach(); // makes sure the thread is detached from the variable.
}
}

View File

@@ -0,0 +1,23 @@
#pragma once
#include <vector>
#include <opencv2/core/types.hpp>
#include <opencv2/videoio.hpp>
#include <functional>
#include "../OpenPoseVideo.h"
#include "StaticCameraInstance.h"
namespace computervision
{
class AsyncArmDetection
{
public:
AsyncArmDetection(void);
void start(std::function<void(std::vector<cv::Point>, cv::Mat poinst_on_image)>, computervision::OpenPoseVideo op);
private:
void run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op);
};
}

View File

@@ -1,9 +1,12 @@
#include <GL/glew.h> #include <GL/glew.h>
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
#include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_transform.hpp>
#include <functional>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h" #include "stb_image.h"
#include <ostream> #include <ostream>
#include <stdlib.h>
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <opencv2/videoio.hpp> #include <opencv2/videoio.hpp>
@@ -17,6 +20,10 @@
#include "toolbox/toolbox.h" #include "toolbox/toolbox.h"
#include "computervision/ObjectDetection.h" #include "computervision/ObjectDetection.h"
//#include "computervision/OpenPoseImage.h"
#include "computervision/OpenPoseVideo.h"
#include "computervision/async/async_arm_detection.h"
#pragma comment(lib, "glfw3.lib") #pragma comment(lib, "glfw3.lib")
#pragma comment(lib, "glew32s.lib") #pragma comment(lib, "glew32s.lib")
@@ -25,85 +32,114 @@
static double UpdateDelta(); static double UpdateDelta();
static GLFWwindow* window; static GLFWwindow* window;
bool points_img_available = false;
cv::Mat points_img;
void retrieve_points(std::vector<Point> arm_points, cv::Mat points_on_image)
{
std::cout << "got points!!" << std::endl;
std::cout << "points: " << arm_points << std::endl;
points_img = points_on_image;
points_img_available = true;
}
int main(void) int main(void)
{ {
#pragma region OPENGL_SETTINGS #pragma region OPENGL_SETTINGS
if (!glfwInit()) if (!glfwInit())
throw "Could not inditialize glwf"; throw "Could not inditialize glwf";
window = glfwCreateWindow(WINDOW_WIDTH, WINDOW_HEIGT, "SDBA", NULL, NULL); window = glfwCreateWindow(WINDOW_WIDTH, WINDOW_HEIGT, "SDBA", NULL, NULL);
if (!window) if (!window)
{ {
glfwTerminate(); glfwTerminate();
throw "Could not initialize glwf"; throw "Could not initialize glwf";
} }
glfwMakeContextCurrent(window); glfwMakeContextCurrent(window);
glewInit(); glewInit();
glGetError(); glGetError();
#pragma endregion #pragma endregion
glfwSetKeyCallback(window, [](GLFWwindow* window, int key, int scancode, int action, int mods) glfwSetKeyCallback(window, [](GLFWwindow* window, int key, int scancode, int action, int mods)
{ {
if (key == GLFW_KEY_ESCAPE) if (key == GLFW_KEY_ESCAPE)
glfwSetWindowShouldClose(window, true); glfwSetWindowShouldClose(window, true);
}); });
models::RawModel raw_model = LoadObjModel("res/Tree.obj"); models::RawModel raw_model = LoadObjModel("res/Tree.obj");
models::ModelTexture texture = { render_engine::loader::LoadTexture("res/TreeTexture.png") }; models::ModelTexture texture = { render_engine::loader::LoadTexture("res/TreeTexture.png") };
models::TexturedModel model = { raw_model, texture }; models::TexturedModel model = { raw_model, texture };
entities::Entity entity(model, glm::vec3(0, -5, -20), glm::vec3(0, 0, 0), 1); entities::Entity entity(model, glm::vec3(0, -5, -20), glm::vec3(0, 0, 0), 1);
shaders::StaticShader shader; shaders::StaticShader shader;
shader.Init(); shader.Init();
render_engine::renderer::Init(shader); render_engine::renderer::Init(shader);
entities::Camera camera(glm::vec3(0, 0, 0), glm::vec3(0, 0, 0)); entities::Camera camera(glm::vec3(0, 0, 0), glm::vec3(0, 0, 0));
// create object detection object instance // create object detection object instance
computervision::ObjectDetection objDetect; computervision::ObjectDetection objDetect;
//computervision::OpenPoseImage openPoseImage;
computervision::OpenPoseVideo openPoseVideo;
openPoseVideo.setup();
// set up object detection // set up object detection
//objDetect.setup(); //objDetect.setup();
//cv::VideoCapture cam = objDetect.getCap();
cv::Mat img;
cv::VideoCapture cap = objDetect.getCap();
//cam.read(img);
//imshow("camera in main loop", img);
computervision::AsyncArmDetection as;
as.start(retrieve_points,openPoseVideo);
// Main game loop // Main game loop
while (!glfwWindowShouldClose(window)) while (!glfwWindowShouldClose(window))
{ {
// Update // Update
const double delta = UpdateDelta(); const double delta = UpdateDelta();
entity.IncreaseRotation(glm::vec3(0, 1, 0)); entity.IncreaseRotation(glm::vec3(0, 1, 0));
camera.Move(window); camera.Move(window);
// Render // Render
render_engine::renderer::Prepare(); render_engine::renderer::Prepare();
shader.Start(); shader.Start();
shader.LoadViewMatrix(camera); shader.LoadViewMatrix(camera);
render_engine::renderer::Render(entity, shader);
//objDetect.setup(); render_engine::renderer::Render(entity, shader);
objDetect.calculateDifference();
//objDetect.detectHand(cameraFrame);
if (points_img_available)
{
imshow("points", points_img);
points_img_available = false;
}
// Finish up // Finish up
shader.Stop(); shader.Stop();
glfwSwapBuffers(window); glfwSwapBuffers(window);
glfwPollEvents(); glfwPollEvents();
} }
// Clean up // Clean up
shader.CleanUp(); shader.CleanUp();
render_engine::loader::CleanUp(); render_engine::loader::CleanUp();
glfwTerminate(); glfwTerminate();
return 0; return 0;
} }
static double UpdateDelta() static double UpdateDelta()
{ {
double current_time = glfwGetTime(); double current_time = glfwGetTime();
static double last_frame_time = current_time; static double last_frame_time = current_time;
double delt_time = current_time - last_frame_time; double delt_time = current_time - last_frame_time;
last_frame_time = current_time; last_frame_time = current_time;
return delt_time; return delt_time;
} }

View File

@@ -18,7 +18,7 @@ namespace render_engine
void Init(shaders::StaticShader& shader) void Init(shaders::StaticShader& shader)
{ {
const glm::mat4 projectionMatrix = const glm::mat4 projectionMatrix =
glm::perspective(glm::radians(FOV), (WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE); glm::perspective(glm::radians(FOV), (float)(WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE);
shader.Start(); shader.Start();
shader.LoadProjectionMatrix(projectionMatrix); shader.LoadProjectionMatrix(projectionMatrix);

View File

@@ -19,8 +19,10 @@
</ProjectConfiguration> </ProjectConfiguration>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp" />
<ClCompile Include="src\computervision\FaceDetector.cpp" /> <ClCompile Include="src\computervision\FaceDetector.cpp" />
<ClCompile Include="src\computervision\ObjectDetection.cpp" /> <ClCompile Include="src\computervision\ObjectDetection.cpp" />
<ClCompile Include="src\computervision\OpenPoseVideo.cpp" />
<ClCompile Include="src\computervision\SkinDetector.cpp" /> <ClCompile Include="src\computervision\SkinDetector.cpp" />
<ClCompile Include="src\computervision\FingerCount.cpp" /> <ClCompile Include="src\computervision\FingerCount.cpp" />
<ClCompile Include="src\computervision\BackgroundRemover.cpp" /> <ClCompile Include="src\computervision\BackgroundRemover.cpp" />
@@ -35,9 +37,12 @@
<ClCompile Include="src\toolbox\toolbox.cpp" /> <ClCompile Include="src\toolbox\toolbox.cpp" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="src\computervision\async\async_arm_detection.h" />
<ClInclude Include="src\computervision\async\StaticCameraInstance.h" />
<ClInclude Include="src\computervision\FaceDetector.h" /> <ClInclude Include="src\computervision\FaceDetector.h" />
<ClInclude Include="src\computervision\FingerCount.h" /> <ClInclude Include="src\computervision\FingerCount.h" />
<ClInclude Include="src\computervision\BackgroundRemover.h" /> <ClInclude Include="src\computervision\BackgroundRemover.h" />
<ClInclude Include="src\computervision\OpenPoseVideo.h" />
<ClInclude Include="src\computervision\SkinDetector.h" /> <ClInclude Include="src\computervision\SkinDetector.h" />
<ClInclude Include="src\computervision\ObjectDetection.h" /> <ClInclude Include="src\computervision\ObjectDetection.h" />
<ClInclude Include="src\entities\camera.h" /> <ClInclude Include="src\entities\camera.h" />
@@ -54,6 +59,12 @@
<ItemGroup> <ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" /> <Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="..\..\Avans Hogeschool\Kim Veldhoen - Proftaak 2.4\pose_iter_160000.caffemodel" />
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
</ItemGroup>
<PropertyGroup Label="Globals"> <PropertyGroup Label="Globals">
<VCProjectVersion>16.0</VCProjectVersion> <VCProjectVersion>16.0</VCProjectVersion>
<ProjectGuid>{A7ECF1BE-DB22-4BF7-BFF6-E3BF72691EE6}</ProjectGuid> <ProjectGuid>{A7ECF1BE-DB22-4BF7-BFF6-E3BF72691EE6}</ProjectGuid>
@@ -120,8 +131,8 @@
</PropertyGroup> </PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental> <LinkIncremental>false</LinkIncremental>
<IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);;C:\opencv\opencv\build\include</IncludePath> <IncludePath>C:\opencv\build\include\;$(VC_IncludePath);$(WindowsSDK_IncludePath);C:\opencv\opencv\build\include</IncludePath>
<LibraryPath>$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\opencv\opencv\build\x64\vc15\lib</LibraryPath> <LibraryPath>C:\opencv\build\x64\vc15\lib;$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\opencv\opencv\build\x64\vc15\lib</LibraryPath>
</PropertyGroup> </PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile> <ClCompile>
@@ -194,7 +205,7 @@
<OptimizeReferences>true</OptimizeReferences> <OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)lib\glfw-3.3.2\$(Platform);$(SolutionDir)lib\glew-2.1.0\lib\Release\$(Platform);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> <AdditionalLibraryDirectories>$(SolutionDir)lib\glfw-3.3.2\$(Platform);$(SolutionDir)lib\glew-2.1.0\lib\Release\$(Platform);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies); opencv_world452.lib</AdditionalDependencies> <AdditionalDependencies>opencv_world452.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link> </Link>
</ItemDefinitionGroup> </ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

View File

@@ -57,6 +57,12 @@
<ClCompile Include="src\computervision\BackgroundRemover.cpp"> <ClCompile Include="src\computervision\BackgroundRemover.cpp">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="src\computervision\OpenPoseVideo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="src\entities\Camera.h"> <ClInclude Include="src\entities\Camera.h">
@@ -104,8 +110,23 @@
<ClInclude Include="src\computervision\BackgroundRemover.h"> <ClInclude Include="src\computervision\BackgroundRemover.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="src\computervision\OpenPoseVideo.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\async\async_arm_detection.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\async\StaticCameraInstance.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" /> <Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
<None Include="..\..\Avans Hogeschool\Kim Veldhoen - Proftaak 2.4\pose_iter_160000.caffemodel" />
</ItemGroup>
</Project> </Project>