22 Commits

Author SHA1 Message Date
Sem van der Hoeven
921609de5d [EDIT] stuff 2021-06-04 11:39:23 +02:00
Sem van der Hoeven
fe94b0f83d [FIX] showing of pose detection points 2021-06-04 10:55:53 +02:00
Sem van der Hoeven
ab30c41bee [FIX] crashing with pose detection 2021-06-02 10:41:50 +02:00
Sem van der Hoeven
1a149b8b7e [ADD] static camera instance 2021-06-02 10:05:09 +02:00
Sem van der Hoeven
cc7cb37840 [ADD] caffemodel project entry 2021-06-02 09:44:30 +02:00
Sem van der Hoeven
40529f84b3 [ADD] basis for async arm detection 2021-05-28 15:31:21 +02:00
Jasper
a68c6a57bf [EDIT] edited file 2021-05-28 12:32:10 +02:00
Jasper
078a6ce66d [ADD] added all the files 2021-05-28 12:27:12 +02:00
Sem van der Hoeven
f1f1aac93d [ADD] comments 2021-05-25 15:54:02 +02:00
Sem van der Hoeven
563f465e2c [EDIT] remove unused methods 2021-05-25 15:46:53 +02:00
Sem van der Hoeven
05ae8ee019 [FEATURE] finished hand open/closed recognition 2021-05-25 14:49:04 +02:00
Sem van der Hoeven
3696e2eb30 [EDIT] improve hand detection with mask 2021-05-25 14:19:18 +02:00
Sem van der Hoeven
276aa1a449 [ADD] mask methods 2021-05-25 13:31:25 +02:00
Sem van der Hoeven
ad4075a826 [EDIT] change window size to ints 2021-05-25 10:19:33 +02:00
Sem van der Hoeven
e50cd92a35 [ADD] some headers 2021-05-25 10:16:58 +02:00
Sem van der Hoeven
ff79c1525c Merge branch 'feature/objectdetection' into develop 2021-05-21 15:25:29 +02:00
Nathalie Seen
a7597c8d4f [ADD] comments to backgroundRemover 2021-05-21 15:24:06 +02:00
Sem van der Hoeven
5b4d9b624f Merge branch 'feature/objectdetection' into develop 2021-05-21 15:22:38 +02:00
Sem van der Hoeven
27aca98ea4 Merge branch 'feature/comments' into feature/objectdetection 2021-05-21 15:22:07 +02:00
Sem van der Hoeven
ca591dd427 [ADD] comments to fingercount 2021-05-21 15:21:03 +02:00
Sem van der Hoeven
acf24cab36 [ADD] comments to skindetector 2021-05-21 14:56:45 +02:00
Menno
01571d191f [FIXED] openCV included 2021-05-18 14:05:56 +02:00
20 changed files with 5661 additions and 94 deletions

2
.gitignore vendored
View File

@@ -428,4 +428,6 @@ FodyWeavers.xsd
**/docs/*
**/doc/*
**/pose_iter_160000.caffemodel
# End of https://www.toptal.com/developers/gitignore/api/c++,visualstudio,visualstudiocode,opencv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -11,15 +11,48 @@ using namespace std;
class BackgroundRemover {
public:
/**
* @brief constructor,
* create background variable and set calibrated to faslse
*
*/
BackgroundRemover(void);
/**
* @brief sets the input image to a grayscale image
* sets calibrated to true
*
* @param input input the image that has to be calibrated
*/
void calibrate(Mat input);
/**
* @brief Gets the mask of the foregorund of the input image
* and copies it to another image
*
* @param input The image from which the forground needs to be picked
* @return The image on which te foregroundmask is copied
*/
Mat getForeground(Mat input);
private:
Mat background;
bool calibrated = false;
/**
* @brief Sets the image to grayscale and removes the background
*
* @param input The image from which the forground needs to be picked
* @return The mask of the foreground of the image
*/
Mat getForegroundMask(Mat input);
/**
* @brief makes everything on the background black
*
* @param input the image from which the background needs to be removed
* @param background the background of the image
*/
void removeBackground(Mat input, Mat background);
};
}

View File

@@ -151,9 +151,16 @@ namespace computervision
drawVectorPoints(frame, filtered_finger_points, color_yellow, false);
putText(frame, to_string(filtered_finger_points.size()), center_bounding_rect, FONT_HERSHEY_PLAIN, 3, color_purple);
amount_of_fingers = filtered_finger_points.size();
return contours_image;
}
int FingerCount::getAmountOfFingers()
{
return amount_of_fingers;
}
double FingerCount::findPointsDistance(Point a, Point b) {
Point difference = a - b;
return sqrt(difference.ddot(difference));

View File

@@ -15,9 +15,24 @@ namespace computervision
class FingerCount {
public:
FingerCount(void);
/**
* @brief gets the amount of fingers that are held up.
*
* @param input_image the source image to find the fingers on. It should be a mask of a hand
* @param frame the frame to draw the resulting values on (how many fingers are held up etc)
* @return a new image with all the data drawn on it.
*/
Mat findFingersCount(Mat input_image, Mat frame);
/**
* @brief gets the currently held-up finger count.
*
* @return the currently held-up finger count
*/
int getAmountOfFingers();
private:
// colors to use
Scalar color_blue;
Scalar color_green;
Scalar color_red;
@@ -25,12 +40,80 @@ namespace computervision
Scalar color_white;
Scalar color_yellow;
Scalar color_purple;
int amount_of_fingers;
/**
* @brief finds the distance between 2 points.
*
* @param a the first point
* @param b the second point
* @return a double representing the distance
*/
double findPointsDistance(Point a, Point b);
/**
* @brief compacts the given points on their medians.
* what it does is for each point, it checks if the distance to it's neighbour is greater than the
* max distance. If so, it just adds it to the list that is returned. If not, it calculates the
* median and adds it to the returned list
*
* @param points the points to compact
* @param max_neighbor_distance the maximum distance between points
* @return a vector with the points now compacted.
*/
vector<Point> compactOnNeighborhoodMedian(vector<Point> points, double max_neighbor_distance);
/**
* @brief finds the angle between 3 different points.
*
* @param a the first point
* @param b the second point
* @param c the third point
* @return the angle between the 3 points
*/
double findAngle(Point a, Point b, Point c);
/**
* @brief checks if the given points make up a finger.
*
* @param a the first point to check for
* @param b the second point to check for
* @param c the third point to check for
* @param limit_angle_inf the limit of the angle between 2 fingers
* @param limit_angle_sup the limit of the angle between a finger and a convex point
* @param palm_center the center of the palm
* @param distance_from_palm_tollerance the distance from the palm tolerance
* @return true if the points are a finger, false if not.
*/
bool isFinger(Point a, Point b, Point c, double limit_angle_inf, double limit_angle_sup, cv::Point palm_center, double distance_from_palm_tollerance);
/**
* @brief finds the closest point to the given point that is in the given list.
*
* @param points the points to check for
* @param pivot the pivot to check against
* @return a vector containing the point that is closest
*/
vector<Point> findClosestOnX(vector<Point> points, Point pivot);
/**
* @brief finds the distance between the x coords of the points.
*
* @param a the first point
* @param b the second point
* @return the distance between the x values
*/
double findPointsDistanceOnX(Point a, Point b);
/**
* @brief draws the points on the image.
*
* @param image the image to draw on
* @param points the points to draw
* @param color the color to draw them with
* @param with_numbers if the numbers should be drawn with the points
*/
void drawVectorPoints(Mat image, vector<Point> points, Scalar color, bool with_numbers);
};
}

View File

@@ -1,47 +1,70 @@
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include "ObjectDetection.h"
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
#include "async/StaticCameraInstance.h"
namespace computervision
{
cv::VideoCapture cap(0);
cv::Mat img, imgGray, img2, img2Gray, img3, img4;
int handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight;
bool handMaskGenerated = false;
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
cv::VideoCapture cap = static_camera::getCap();
ObjectDetection::ObjectDetection()
{
}
bool ObjectDetection::setup()
{
if (!cap.isOpened()) {
cout << "Can't find camera!" << endl;
return false;
cv::Mat ObjectDetection::readCamera() {
cap.read(img);
return img;
}
cap.read(frame);
frameOut = frame.clone();
cv::VideoCapture ObjectDetection::getCap()
{
return cap;
}
bool ObjectDetection::detectHand(Mat cameraFrame)
{
Mat inputFrame = generateHandMaskSquare(cameraFrame);
frameOut = inputFrame.clone();
// detect skin color
skinDetector.drawSkinColorSampler(frameOut);
foreground = backgroundRemover.getForeground(frame);
// remove background from image
foreground = backgroundRemover.getForeground(inputFrame);
faceDetector.removeFaces(frame, foreground);
// detect the hand contours
handMask = skinDetector.getSkinMask(foreground);
// count the amount of fingers and put the info on the matrix
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
//backgroundRemover.calibrate(frame);
// get the amount of fingers
int fingers_amount = fingerCount.getAmountOfFingers();
// draw the hand rectangle on the camera input, and draw text showing if the hand is open or closed.
drawHandMaskRect(&cameraFrame);
string hand_text = fingers_amount > 0 ? "open" : "closed";
putText(cameraFrame,hand_text, Point(10, 75), FONT_HERSHEY_PLAIN, 2.0, Scalar(255, 0, 255),3);
imshow("camera", cameraFrame);
imshow("output", frameOut);
imshow("foreground", foreground);
@@ -50,12 +73,12 @@ namespace computervision
int key = waitKey(1);
if (key == 98) // b
backgroundRemover.calibrate(frame);
else if (key == 115) // s
skinDetector.calibrate(frame);
if (key == 98) // b, calibrate the background
backgroundRemover.calibrate(inputFrame);
else if (key == 115) // s, calibrate the skin color
skinDetector.calibrate(inputFrame);
return true;
return fingers_amount > 0;
}
void ObjectDetection::calculateDifference()
@@ -72,14 +95,32 @@ namespace computervision
imshow("threshold", img4);
}
void ObjectDetection::detect()
{
int key = waitKey(1);
if (key == 98) // b
backgroundRemover.calibrate(frame);
else if (key == 115) // s
skinDetector.calibrate(frame);
cv::Mat ObjectDetection::generateHandMaskSquare(cv::Mat img)
{
handMaskStartXPos = 20;
handMaskStartYPos = img.rows / 5;
handMaskWidth = img.cols / 3;
handMaskHeight = img.cols / 3;
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
cv::Mat dstImg = cv::Mat::zeros(img.size(), img.type());
cv::rectangle(mask, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255), -1);
img.copyTo(dstImg, mask);
handMaskGenerated = true;
return dstImg;
}
bool ObjectDetection::drawHandMaskRect(cv::Mat* input)
{
if (!handMaskGenerated) return false;
rectangle(*input, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255));
return true;
}
void ObjectDetection::showWebcam()

View File

@@ -22,13 +22,7 @@ namespace computervision
*
*/
ObjectDetection();
/**
* @brief Initializes the object detection, captures a frame and modifies it
* so it is ready to use for object detection
*
* @return return true if webcam is connected, returns false if it isn't
*/
bool setup();
/**
* @brief Displays an image of the current webcam-footage
*
@@ -40,11 +34,39 @@ namespace computervision
*
*/
void calculateDifference();
/**
* @brief Listens for keypresses and handles them
* @brief generates the square that will hold the mask in which the hand will be detected.
*
* @param img the current camear frame
* @return a matrix containing the mask
*/
void detect();
cv::Mat generateHandMaskSquare(cv::Mat img);
/**
* @brief reads the camera and returns it in a matrix.
*
* @return the camera frame in a matrix
*/
cv::Mat readCamera();
/**
* @brief detects a hand based on the given hand mask input frame.
*
* @param inputFrame the input frame from the camera
* @return true if hand is open, false if hand is closed
*/
bool detectHand(cv::Mat cameraFrame);
/**
* @brief draws the hand mask rectangle on the given input matrix.
*
* @param input the input matrix to draw the rectangle on
*/
bool drawHandMaskRect(cv::Mat *input);
cv::VideoCapture getCap();
};

View File

@@ -0,0 +1,108 @@
#include "OpenPoseVideo.h"
using namespace std;
using namespace cv;
using namespace cv::dnn;
namespace computervision
{
#define MPI
#ifdef MPI
const int POSE_PAIRS[7][2] =
{
{0,1}, {1,2}, {2,3},
{3,4}, {1,5}, {5,6},
{6,7}
};
string protoFile = "res/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt";
string weightsFile = "res/pose/mpi/pose_iter_160000.caffemodel";
int nPoints = 8;
#endif
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0}, {0,14},
{14,16}, {0,15}, {15,17}
};
string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
Net net;
void OpenPoseVideo::setup() {
net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
}
void OpenPoseVideo::movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f) {
std::cout << "movement skeleton start" << std::endl;
int inWidth = 368;
int inHeight = 368;
float thresh = 0.01;
Mat frame;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
double t = (double)cv::getTickCount();
std::cout << "reading input image and blob" << std::endl;
frame = inputImage;
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
std::cout << "done reading image and blob" << std::endl;
net.setInput(inpBlob);
std::cout << "done setting input to net" << std::endl;
Mat output = net.forward();
std::cout << "time took to set input and forward: " << t << std::endl;
int H = output.size[2];
int W = output.size[3];
std::cout << "about to find position of boxy parts" << std::endl;
// find the position of the body parts
vector<Point> points(nPoints);
for (int n = 0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0, n));
Point2f p(-1, -1);
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W;
p.y *= (float)frameHeight / H;
circle(frame, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frame, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
cv::putText(frame, cv::format("time taken = %.2f sec", t), cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, .8, cv::Scalar(255, 50, 0), 2);
std::cout << "time taken: " << t << std::endl;
//imshow("Output-Keypoints", frame);
//imshow("Output-Skeleton", frame);
std::cout << "about to call points receiving method" << std::endl;
f(points,frame);
}
}

View File

@@ -0,0 +1,19 @@
#pragma once
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
namespace computervision
{
class OpenPoseVideo{
private:
public:
void movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f);
void setup();
};
}

View File

@@ -22,7 +22,7 @@ namespace computervision
void SkinDetector::drawSkinColorSampler(Mat input) {
int frameWidth = input.size().width, frameHeight = input.size().height;
int rectangleSize = 20;
int rectangleSize = 25;
Scalar rectangleColor = Scalar(255, 0, 255);
skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize);

View File

@@ -17,11 +17,31 @@ namespace computervision
public:
SkinDetector(void);
/*
* @brief draws the positions in where the skin color will be sampled.
*
* @param input the input matrix to sample the skin color from
*/
void drawSkinColorSampler(Mat input);
/*
* @brief calibrates the skin color detector with the given input frame
*
* @param input the input frame to calibrate from
*/
void calibrate(Mat input);
/*
* @brief gets the mask for the hand
*
* @param input the input matrix to get the skin mask from
* @returns the skin mask in a new matrix
*/
Mat getSkinMask(Mat input);
private:
// thresholds for hsv calculation
int hLowThreshold = 0;
int hHighThreshold = 0;
int sLowThreshold = 0;
@@ -29,11 +49,28 @@ namespace computervision
int vLowThreshold = 0;
int vHighThreshold = 0;
// wether or not the skindetector has calibrated yet.
bool calibrated = false;
// rectangles that get drawn to show where the skin color will be sampled
Rect skinColorSamplerRectangle1, skinColorSamplerRectangle2;
/*
* @brief calculates the skin tresholds for the given samples
*
* @param sample1 the first sample
* @param sample2 the second sample
*/
void calculateThresholds(Mat sample1, Mat sample2);
void performOpening(Mat binaryImage, int structuralElementShapde, Point structuralElementSize);
/**
* @brief the opening. it generates the structuring element and performs the morphological transformations required to detect the hand.
* This needs to be done to get the skin mask.
*
* @param binaryImage the matrix to perform the opening on. This needs to be a binary image, so consisting of only 1's and 0's.
* @param structuralElementShape the shape to use for the kernel that is used with generating the structuring element
* @param structuralElementSize the size of the kernel that will be used with generating the structuring element.
*/
void performOpening(Mat binaryImage, int structuralElementShape, Point structuralElementSize);
};
}

View File

@@ -0,0 +1,12 @@
#pragma once
#include <opencv2/videoio.hpp>
namespace static_camera
{
static cv::VideoCapture getCap()
{
static cv::VideoCapture cap(0);
return cap;
}
};

View File

@@ -0,0 +1,46 @@
#include <iostream>
#include "async_arm_detection.h"
#include "../OpenPoseVideo.h"
#include <thread>
#include "StaticCameraInstance.h"
namespace computervision
{
AsyncArmDetection::AsyncArmDetection()
{
}
void AsyncArmDetection::run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
VideoCapture cap = static_camera::getCap();
std::cout << "STARTING THREAD LAMBDA" << std::endl;
/*cv::VideoCapture cap = static_camera::getCap();*/
if (!cap.isOpened())
{
std::cout << "capture was closed, opening..." << std::endl;
cap.open(0);
}
while (true)
{
Mat img;
cap.read(img);
op.movementSkeleton(img, points_ready_func);
}
}
void AsyncArmDetection::start(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
std::cout << "starting function" << std::endl;
std::thread async_arm_detect_thread(&AsyncArmDetection::run_arm_detection,this, points_ready_func, op);
async_arm_detect_thread.detach(); // makes sure the thread is detached from the variable.
}
}

View File

@@ -0,0 +1,23 @@
#pragma once
#include <vector>
#include <opencv2/core/types.hpp>
#include <opencv2/videoio.hpp>
#include <functional>
#include "../OpenPoseVideo.h"
#include "StaticCameraInstance.h"
namespace computervision
{
class AsyncArmDetection
{
public:
AsyncArmDetection(void);
void start(std::function<void(std::vector<cv::Point>, cv::Mat poinst_on_image)>, computervision::OpenPoseVideo op);
private:
void run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op);
};
}

View File

@@ -1,9 +1,16 @@
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/gtc/matrix_transform.hpp>
#include <functional>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include <ostream>
#include <stdlib.h>
#include <opencv2/core.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/video.hpp>
#include "models/model.h"
#include "renderEngine/loader.h"
@@ -13,6 +20,10 @@
#include "toolbox/toolbox.h"
#include "computervision/ObjectDetection.h"
//#include "computervision/OpenPoseImage.h"
#include "computervision/OpenPoseVideo.h"
#include "computervision/async/async_arm_detection.h"
#pragma comment(lib, "glfw3.lib")
#pragma comment(lib, "glew32s.lib")
@@ -21,7 +32,17 @@
static double UpdateDelta();
static GLFWwindow* window;
bool points_img_available = false;
cv::Mat points_img;
void retrieve_points(std::vector<Point> arm_points, cv::Mat points_on_image)
{
std::cout << "got points!!" << std::endl;
std::cout << "points: " << arm_points << std::endl;
points_img = points_on_image;
points_img_available = true;
}
int main(void)
{
@@ -57,11 +78,26 @@ int main(void)
entities::Camera camera(glm::vec3(0, 0, 0), glm::vec3(0, 0, 0));
// create object detection object instance
computervision::ObjectDetection objDetect;
//computervision::OpenPoseImage openPoseImage;
computervision::OpenPoseVideo openPoseVideo;
openPoseVideo.setup();
// set up object detection
//objDetect.setup();
//cv::VideoCapture cam = objDetect.getCap();
cv::Mat img;
cv::VideoCapture cap = objDetect.getCap();
//cam.read(img);
//imshow("camera in main loop", img);
computervision::AsyncArmDetection as;
as.start(retrieve_points,openPoseVideo);
// Main game loop
while (!glfwWindowShouldClose(window))
@@ -76,9 +112,15 @@ int main(void)
shader.Start();
shader.LoadViewMatrix(camera);
render_engine::renderer::Render(entity, shader);
objDetect.setup();
//objDetect.detectHand(cameraFrame);
if (points_img_available)
{
imshow("points", points_img);
points_img_available = false;
}
// Finish up
shader.Stop();

View File

@@ -18,7 +18,7 @@ namespace render_engine
void Init(shaders::StaticShader& shader)
{
const glm::mat4 projectionMatrix =
glm::perspective(glm::radians(FOV), (WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE);
glm::perspective(glm::radians(FOV), (float)(WINDOW_WIDTH / WINDOW_HEIGT), NEAR_PLANE, FAR_PLANE);
shader.Start();
shader.LoadProjectionMatrix(projectionMatrix);

View File

@@ -5,8 +5,8 @@
namespace toolbox
{
#define WINDOW_WIDTH 1400.0f
#define WINDOW_HEIGT 800.0f
#define WINDOW_WIDTH 1400
#define WINDOW_HEIGT 800
glm::mat4 CreateModelMatrix(glm::vec3 translation, glm::vec3 rotation, float scale);

View File

@@ -19,8 +19,10 @@
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp" />
<ClCompile Include="src\computervision\FaceDetector.cpp" />
<ClCompile Include="src\computervision\ObjectDetection.cpp" />
<ClCompile Include="src\computervision\OpenPoseVideo.cpp" />
<ClCompile Include="src\computervision\SkinDetector.cpp" />
<ClCompile Include="src\computervision\FingerCount.cpp" />
<ClCompile Include="src\computervision\BackgroundRemover.cpp" />
@@ -35,9 +37,12 @@
<ClCompile Include="src\toolbox\toolbox.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\computervision\async\async_arm_detection.h" />
<ClInclude Include="src\computervision\async\StaticCameraInstance.h" />
<ClInclude Include="src\computervision\FaceDetector.h" />
<ClInclude Include="src\computervision\FingerCount.h" />
<ClInclude Include="src\computervision\BackgroundRemover.h" />
<ClInclude Include="src\computervision\OpenPoseVideo.h" />
<ClInclude Include="src\computervision\SkinDetector.h" />
<ClInclude Include="src\computervision\ObjectDetection.h" />
<ClInclude Include="src\entities\camera.h" />
@@ -54,6 +59,12 @@
<ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup>
<ItemGroup>
<None Include="..\..\Avans Hogeschool\Kim Veldhoen - Proftaak 2.4\pose_iter_160000.caffemodel" />
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>16.0</VCProjectVersion>
<ProjectGuid>{A7ECF1BE-DB22-4BF7-BFF6-E3BF72691EE6}</ProjectGuid>
@@ -112,14 +123,16 @@
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>C:\opencv\build\include;$(IncludePath)</IncludePath>
<LibraryPath>C:\opencv\build\x64\vc15\lib;$(LibraryPath)</LibraryPath>
<IncludePath>C:\opencv\build\include;$(IncludePath);C:\opencv\opencv\build\include</IncludePath>
<LibraryPath>C:\opencv\build\x64\vc15\lib;$(LibraryPath);C:\opencv\opencv\build\x64\vc15\lib</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>C:\opencv\build\include\;$(VC_IncludePath);$(WindowsSDK_IncludePath);C:\opencv\opencv\build\include</IncludePath>
<LibraryPath>C:\opencv\build\x64\vc15\lib;$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64);C:\opencv\opencv\build\x64\vc15\lib</LibraryPath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
@@ -151,7 +164,7 @@
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)lib\glfw-3.3.2\$(Platform);$(SolutionDir)lib\glew-2.1.0\lib\Release\$(Platform);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>opencv_world452d.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalDependencies>opencv_world452d.lib;%(AdditionalDependencies); opencv_world452.lib</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -192,6 +205,7 @@
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)lib\glfw-3.3.2\$(Platform);$(SolutionDir)lib\glew-2.1.0\lib\Release\$(Platform);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>opencv_world452.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

View File

@@ -57,6 +57,12 @@
<ClCompile Include="src\computervision\BackgroundRemover.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\computervision\OpenPoseVideo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\computervision\async\async_arm_detection.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\entities\Camera.h">
@@ -104,8 +110,23 @@
<ClInclude Include="src\computervision\BackgroundRemover.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\OpenPoseVideo.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\async\async_arm_detection.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\computervision\async\StaticCameraInstance.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Xml Include="res\haarcascade_frontalface_alt.xml" />
</ItemGroup>
<ItemGroup>
<None Include="res\pose\coco\pose_deploy_linevec.prototxt" />
<None Include="res\pose\mpi\pose_deploy_linevec_faster_4_stages.prototxt" />
<None Include="res\pose\mpi\pose_iter_160000.caffemodel" />
<None Include="..\..\Avans Hogeschool\Kim Veldhoen - Proftaak 2.4\pose_iter_160000.caffemodel" />
</ItemGroup>
</Project>