Merge branch 'develop' into mergebranch-Jasper

This commit is contained in:
SemvdH
2021-06-11 10:27:30 +02:00
committed by GitHub
46 changed files with 28302 additions and 4730 deletions

View File

@@ -1,5 +1,6 @@
#pragma once
#include <iostream>
#include <glm/gtc/matrix_transform.hpp>
#include "../entities/entity.h"
@@ -15,6 +16,18 @@ namespace collision
{
glm::vec3 center_pos;
glm::vec3 size;
void SetRotation(float angle)
{
double sinTheta = glm::sin(glm::radians(angle));
double cosTheta = glm::cos(glm::radians(angle));
float x = size.x * cosTheta + size.z * sinTheta;
float z = size.z * cosTheta - size.x * sinTheta;
size.x = x < 0 ? -x : x;
size.z = z < 0 ? -z : z;
}
};
/*

View File

@@ -1,40 +1,44 @@
#include "collision_handler.h"
#include <iostream>
namespace collision
{
void CheckCollisions(std::vector<entities::CollisionEntity*>& entities)
void CheckCollisions(std::vector<std::shared_ptr<entities::CollisionEntity>> entities)
{
if (entities.size() == 2)
{
if (entities[0]->IsColliding(*entities[1]))
if (entities.size() < 2) { return; }
if (entities.size() == 2)
{
collision::Collision c = { *entities[0], *entities[1] };
entities[0]->OnCollide(c);
entities[1]->OnCollide(c);
if (entities[0]->IsColliding(*entities[1]))
{
collision::Collision c = { *entities[0], *entities[1] };
entities[0]->OnCollide(c);
entities[1]->OnCollide(c);
}
}
}
for (int i = 0; i < entities.size() - 2; i++)
{
entities::CollisionEntity* entity = entities[i];
for (int j = i + 1; i < entities.size() - 1; j++)
{
entities::CollisionEntity* entity2 = entities[j];
if (entity == entity2)
{
continue;
}
if (entity->IsColliding(*entity2))
{
collision::Collision c = { *entity, *entity2 };
entity->OnCollide(c);
entity2->OnCollide(c);
break;
}
}
}
for (int i = 0; i < entities.size() - 2; i++)
{
std::shared_ptr<entities::CollisionEntity> entity = entities[i];
for (int j = i + 1; j < entities.size() - 1; j++)
{
std::shared_ptr<entities::CollisionEntity> entity2 = entities[j];
if (entity == entity2)
{
continue;
}
if (entity->IsColliding(*entity2))
{
collision::Collision c = { *entity, *entity2 };
entity->OnCollide(c);
entity2->OnCollide(c);
break;
}
}
}
}
}

View File

@@ -1,5 +1,6 @@
#pragma once
#include <memory>
#include <vector>
#include "../entities/collision_entity.h"
#include "collision.h"
@@ -12,5 +13,5 @@ namespace collision
*
* @param entities: A list with all the collision entities.
*/
void CheckCollisions(std::vector<entities::CollisionEntity*>& entities);
void CheckCollisions(std::vector<std::shared_ptr<entities::CollisionEntity>> entities);
}

View File

@@ -1,53 +0,0 @@
#include "FaceDetector.h"
/*
Author: Pierfrancesco Soffritti https://github.com/PierfrancescoSoffritti
*/
namespace computervision
{
Rect getFaceRect(Mat input);
String faceClassifierFileName = "res/haarcascade_frontalface_alt.xml";
CascadeClassifier faceCascadeClassifier;
FaceDetector::FaceDetector(void) {
if (!faceCascadeClassifier.load(faceClassifierFileName))
throw runtime_error("can't load file " + faceClassifierFileName);
}
void FaceDetector::removeFaces(Mat input, Mat output) {
vector<Rect> faces;
Mat frameGray;
cvtColor(input, frameGray, CV_BGR2GRAY);
equalizeHist(frameGray, frameGray);
faceCascadeClassifier.detectMultiScale(frameGray, faces, 1.1, 2, 0 | 2, Size(120, 120)); // HAAR_SCALE_IMAGE is 2
for (size_t i = 0; i < faces.size(); i++) {
rectangle(
output,
Point(faces[i].x, faces[i].y),
Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height),
Scalar(0, 0, 0),
-1
);
}
}
Rect getFaceRect(Mat input) {
vector<Rect> faceRectangles;
Mat inputGray;
cvtColor(input, inputGray, CV_BGR2GRAY);
equalizeHist(inputGray, inputGray);
faceCascadeClassifier.detectMultiScale(inputGray, faceRectangles, 1.1, 2, 0 | 2, Size(120, 120)); // HAAR_SCALE_IMAGE is 2
if (faceRectangles.size() > 0)
return faceRectangles[0];
else
return Rect(0, 0, 1, 1);
}
}

View File

@@ -1,31 +0,0 @@
#pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/types_c.h>
#include <opencv2/objdetect.hpp>
#include <opencv2/core.hpp>
#include <opencv2/objdetect/objdetect.hpp>
/*
Author: Pierfrancesco Soffritti https://github.com/PierfrancescoSoffritti
*/
using namespace cv;
using namespace std;
namespace computervision
{
class FaceDetector {
public:
/**
* @brief Constructor for the class FaceDetector, loads training data from a file
*
*/
FaceDetector(void);
/**
* @brief Detects faces on an image and blocks them with a black rectangle
*
* @param input Input image
* @param output Output image
*/
void removeFaces(Mat input, Mat output);
};
}

View File

@@ -14,6 +14,7 @@
namespace computervision
{
FingerCount::FingerCount(void) {
color_blue = Scalar(255, 0, 0);
color_green = Scalar(0, 255, 0);
@@ -35,9 +36,6 @@ namespace computervision
if (input_image.channels() != 1)
return contours_image;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(input_image, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
// we need at least one contour to work
@@ -45,7 +43,7 @@ namespace computervision
return contours_image;
// find the biggest contour (let's suppose it's our hand)
int biggest_contour_index = -1;
biggest_contour_index = -1;
double biggest_area = 0.0;
for (int i = 0; i < contours.size(); i++) {
@@ -156,6 +154,11 @@ namespace computervision
return contours_image;
}
void FingerCount::DrawHandContours(Mat& image)
{
drawContours(image, contours, biggest_contour_index, color_green, 2, 8, hierarchy);
}
int FingerCount::getAmountOfFingers()
{
return amount_of_fingers;

View File

@@ -31,7 +31,15 @@ namespace computervision
*/
int getAmountOfFingers();
void DrawHandContours(Mat& image);
private:
int biggest_contour_index;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
// colors to use
Scalar color_blue;
Scalar color_green;
@@ -115,5 +123,7 @@ namespace computervision
* @param with_numbers if the numbers should be drawn with the points
*/
void drawVectorPoints(Mat image, vector<Point> points, Scalar color, bool with_numbers);
};
}

View File

@@ -0,0 +1,105 @@
#include "HandDetectRegion.h"
namespace computervision
{
HandDetectRegion::HandDetectRegion(std::string id,int x_pos, int y_pos, int width, int height)
{
region_id = id;
start_x_pos = x_pos;
start_y_pos = y_pos;
region_width = width;
region_height = height;
hand_mask_generated = false;
hand_present = false;
}
void HandDetectRegion::DetectHand(cv::Mat& camera_frame)
{
Mat input_frame = GenerateHandMaskSquare(camera_frame);
frame_out = input_frame.clone();
// detect skin color
skin_detector.drawSkinColorSampler(camera_frame,start_x_pos,start_y_pos,region_width,region_height);
// remove background from image
foreground = background_remover.getForeground(input_frame);
// detect the hand contours
handMask = skin_detector.getSkinMask(foreground);
// draw the hand rectangle on the camera input, and draw text showing if the hand is open or closed.
DrawHandMask(&camera_frame);
//imshow("output" + region_id, frame_out);
//imshow("foreground" + region_id, foreground);
//imshow("handMask" + region_id, handMask);
/*imshow("handDetection", fingerCountDebug);*/
hand_present = hand_calibrator.CheckIfHandPresent(handMask,handcalibration::HandDetectionType::GAME);
//std::string text = (hand_present ? "hand" : "no");
//cv::putText(camera_frame, text, cv::Point(start_x_pos, start_y_pos), cv::FONT_HERSHEY_COMPLEX, 2.0, cv::Scalar(0, 255, 255), 2);
hand_calibrator.SetHandPresent(hand_present);
//draw black rectangle behind calibration information text
cv::rectangle(camera_frame, cv::Rect(0, camera_frame.rows - 55, 450, camera_frame.cols), cv::Scalar(0, 0, 0), -1);
hand_calibrator.DrawBackgroundSkinCalibrated(camera_frame);
}
cv::Mat HandDetectRegion::GenerateHandMaskSquare(cv::Mat img)
{
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
cv::Mat distance_img = cv::Mat::zeros(img.size(), img.type());
cv::rectangle(mask, cv::Rect(start_x_pos, start_y_pos, region_width, region_height), cv::Scalar(255, 255, 255), -1);
img.copyTo(distance_img, mask);
hand_mask_generated = true;
return distance_img;
}
bool HandDetectRegion::DrawHandMask(cv::Mat* input)
{
if (!hand_mask_generated) return false;
rectangle(*input, Rect(start_x_pos, start_y_pos, region_width, region_height), (hand_present ? Scalar(0, 255, 0) : Scalar(0,0,255)),2);
return true;
}
bool HandDetectRegion::IsHandPresent()
{
return hand_present;
}
void HandDetectRegion::CalibrateBackground()
{
std::cout << "calibrating background " << region_id << std::endl;
background_remover.calibrate(frame_out);
hand_calibrator.SetBackGroundCalibrated(true);
}
void HandDetectRegion::CalibrateSkin()
{
skin_detector.calibrate(frame_out);
hand_calibrator.SetSkinCalibration(true);
}
std::vector<int> HandDetectRegion::CalculateSkinTresholds()
{
std::cout << "calibrating skin " << region_id << std::endl;
hand_calibrator.SetSkinCalibration(true);
return skin_detector.calibrateAndReturn(frame_out);
}
void HandDetectRegion::setSkinTresholds(std::vector<int>& tresholds)
{
std::cout << "setting skin " << region_id << std::endl;
skin_detector.setTresholds(tresholds);
hand_calibrator.SetSkinCalibration(true);
}
}

View File

@@ -0,0 +1,56 @@
#pragma once
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "async/StaticCameraInstance.h"
#include "calibration/HandCalibrator.h"
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FingerCount.h"
namespace computervision
{
class HandDetectRegion
{
public:
HandDetectRegion(std::string id,int x_pos, int y_pos, int width, int height);
void SetXPos(int x) { start_x_pos = x; }
void SetYPos(int y) { start_y_pos = y; }
int GetXPos() { return start_x_pos; }
int GetYPos() { return start_y_pos; }
void SetWidth(int width) { region_width = width; }
void SetHeigth(int height) { region_height = height; }
int GetWidth() { return region_width; }
int GetHeight() { return region_height; }
cv::Mat GenerateHandMaskSquare(cv::Mat img);
void DetectHand(cv::Mat& camera_frame);
bool IsHandPresent();
void CalibrateBackground();
void CalibrateSkin();
std::vector<int> CalculateSkinTresholds();
void setSkinTresholds(std::vector<int>& tresholds);
private:
int start_x_pos;
int start_y_pos;
int region_height;
int region_width;
bool hand_mask_generated;
bool hand_present;
cv::Mat frame, frame_out, handMask, foreground, fingerCountDebug;
BackgroundRemover background_remover;
SkinDetector skin_detector;
handcalibration::HandCalibrator hand_calibrator;
std::string region_id;
bool DrawHandMask(cv::Mat* input);
};
}

View File

@@ -6,117 +6,140 @@
#include "ObjectDetection.h"
#include "BackgroundRemover.h"
#include "SkinDetector.h"
#include "FaceDetector.h"
#include "FingerCount.h"
#include "async/StaticCameraInstance.h"
#include "calibration/HandCalibrator.h"
namespace computervision
{
cv::VideoCapture cap(0);
cv::Mat img, imgGray, img2, img2Gray, img3, img4;
cv::Mat img, img_gray, img2, img2_gray, img3, img4;
int handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight;
bool handMaskGenerated = false;
int hand_mask_start_x_pos, hand_mask_start_y_pos, hand_mask_width, hand_mask_height;
bool hand_mask_generated = false;
Mat frame, frameOut, handMask, foreground, fingerCountDebug;
BackgroundRemover backgroundRemover;
SkinDetector skinDetector;
FaceDetector faceDetector;
FingerCount fingerCount;
Mat frame, frame_out, handMask, foreground, fingerCountDebug;
BackgroundRemover background_remover;
SkinDetector skin_detector;
FingerCount finger_count;
handcalibration::HandCalibrator hand_calibrator;
cv::VideoCapture cap = static_camera::getCap();
ObjectDetection::ObjectDetection()
{
}
cv::Mat ObjectDetection::readCamera() {
cv::Mat ObjectDetection::ReadCamera() {
cap.read(img);
return img;
}
bool ObjectDetection::detectHand(Mat cameraFrame)
cv::VideoCapture ObjectDetection::GetCap()
{
Mat inputFrame = generateHandMaskSquare(cameraFrame);
frameOut = inputFrame.clone();
return cap;
}
bool ObjectDetection::DetectHand(Mat camera_frame, bool& hand_present)
{
Mat input_frame = GenerateHandMaskSquare(camera_frame);
frame_out = input_frame.clone();
// detect skin color
skinDetector.drawSkinColorSampler(frameOut);
skin_detector.drawSkinColorSampler(camera_frame);
// remove background from image
foreground = backgroundRemover.getForeground(inputFrame);
foreground = background_remover.getForeground(input_frame);
// detect the hand contours
handMask = skinDetector.getSkinMask(foreground);
handMask = skin_detector.getSkinMask(foreground);
// count the amount of fingers and put the info on the matrix
fingerCountDebug = fingerCount.findFingersCount(handMask, frameOut);
fingerCountDebug = finger_count.findFingersCount(handMask, frame_out);
// get the amount of fingers
int fingers_amount = fingerCount.getAmountOfFingers();
int fingers_amount = finger_count.getAmountOfFingers();
// draw the hand rectangle on the camera input, and draw text showing if the hand is open or closed.
drawHandMaskRect(&cameraFrame);
string hand_text = fingers_amount > 0 ? "open" : "closed";
putText(cameraFrame,hand_text, Point(10, 75), FONT_HERSHEY_PLAIN, 2.0, Scalar(255, 0, 255),3);
imshow("camera", cameraFrame);
DrawHandMask(&camera_frame);
hand_calibrator.SetAmountOfFingers(fingers_amount);
finger_count.DrawHandContours(camera_frame);
hand_calibrator.DrawHandCalibrationText(camera_frame);
imshow("camera", camera_frame);
/*imshow("output", frame_out);
imshow("foreground", foreground);
imshow("handMask", handMask);
imshow("handDetection", fingerCountDebug);*/
hand_present = hand_calibrator.CheckIfHandPresent(handMask,handcalibration::HandDetectionType::MENU);
hand_calibrator.SetHandPresent(hand_present);
//imshow("output", frameOut);
//imshow("foreground", foreground);
//imshow("handMask", handMask);
//imshow("handDetection", fingerCountDebug);
int key = waitKey(1);
if (key == 98) // b, calibrate the background
backgroundRemover.calibrate(inputFrame);
{
background_remover.calibrate(input_frame);
hand_calibrator.SetBackGroundCalibrated(true);
}
else if (key == 115) // s, calibrate the skin color
skinDetector.calibrate(inputFrame);
{
skin_detector.calibrate(input_frame);
hand_calibrator.SetSkinCalibration(true);
}
return fingers_amount > 0;
}
void ObjectDetection::calculateDifference()
void ObjectDetection::CalculateDifference()
{
cap.read(img);
cap.read(img2);
cv::cvtColor(img, imgGray, cv::COLOR_RGBA2GRAY);
cv::cvtColor(img2, img2Gray, cv::COLOR_RGBA2GRAY);
cv::cvtColor(img, img_gray, cv::COLOR_RGBA2GRAY);
cv::cvtColor(img2, img2_gray, cv::COLOR_RGBA2GRAY);
cv::absdiff(imgGray, img2Gray, img3);
cv::absdiff(img_gray, img2_gray, img3);
cv::threshold(img3, img4, 50, 170, cv::THRESH_BINARY);
imshow("threshold", img4);
}
cv::Mat ObjectDetection::generateHandMaskSquare(cv::Mat img)
cv::Mat ObjectDetection::GenerateHandMaskSquare(cv::Mat img)
{
handMaskStartXPos = 20;
handMaskStartYPos = img.rows / 5;
handMaskWidth = img.cols / 3;
handMaskHeight = img.cols / 3;
hand_mask_start_x_pos = 20;
hand_mask_start_y_pos = img.rows / 5;
hand_mask_width = img.cols / 3;
hand_mask_height = img.cols / 3;
cv::Mat mask = cv::Mat::zeros(img.size(), img.type());
cv::Mat dstImg = cv::Mat::zeros(img.size(), img.type());
cv::Mat distance_img = cv::Mat::zeros(img.size(), img.type());
cv::rectangle(mask, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255), -1);
cv::rectangle(mask, Rect(hand_mask_start_x_pos, hand_mask_start_y_pos, hand_mask_width, hand_mask_height), Scalar(255, 255, 255), -1);
img.copyTo(dstImg, mask);
img.copyTo(distance_img, mask);
handMaskGenerated = true;
return dstImg;
hand_mask_generated = true;
return distance_img;
}
bool ObjectDetection::drawHandMaskRect(cv::Mat* input)
bool ObjectDetection::DrawHandMask(cv::Mat* input)
{
if (!handMaskGenerated) return false;
rectangle(*input, Rect(handMaskStartXPos, handMaskStartYPos, handMaskWidth, handMaskHeight), Scalar(255, 255, 255));
if (!hand_mask_generated) return false;
rectangle(*input, Rect(hand_mask_start_x_pos, hand_mask_start_y_pos, hand_mask_width, hand_mask_height), Scalar(255, 255, 255));
return true;
}
void ObjectDetection::showWebcam()
void ObjectDetection::ShowWebcam()
{
imshow("Webcam image", img);
}

View File

@@ -27,13 +27,13 @@ namespace computervision
* @brief Displays an image of the current webcam-footage
*
*/
void showWebcam();
void ShowWebcam();
/**
* @brief Calculates the difference between two images
* and outputs an image that only shows the difference
*
*/
void calculateDifference();
void CalculateDifference();
/**
* @brief generates the square that will hold the mask in which the hand will be detected.
@@ -41,29 +41,51 @@ namespace computervision
* @param img the current camear frame
* @return a matrix containing the mask
*/
cv::Mat generateHandMaskSquare(cv::Mat img);
cv::Mat GenerateHandMaskSquare(cv::Mat img);
/**
* @brief reads the camera and returns it in a matrix.
*
* @return the camera frame in a matrix
*/
cv::Mat readCamera();
cv::Mat ReadCamera();
/**
* @brief detects a hand based on the given hand mask input frame.
*
* @param inputFrame the input frame from the camera
* @param hand_present boolean that will hold true if the hand is detected, false if not.
* @return true if hand is open, false if hand is closed
*/
bool detectHand(cv::Mat cameraFrame);
bool DetectHand(cv::Mat camera_frame, bool& hand_present);
/**
* @brief draws the hand mask rectangle on the given input matrix.
*
* @param input the input matrix to draw the rectangle on
*/
bool drawHandMaskRect(cv::Mat *input);
bool DrawHandMask(cv::Mat *input);
/**
* @brief checks if the hand of the user is open.
*
* @return true if the hand is open, false if not.
*/
bool IsHandOpen();
/**
* @brief checks whether the hand is held within the detection square.
*
* @return true if the hand is in the detection square, false if not.
*/
bool IsHandPresent();
cv::VideoCapture GetCap();
private:
bool is_hand_open;
bool is_hand_present;
};

View File

@@ -0,0 +1,108 @@
#include "OpenPoseVideo.h"
using namespace std;
using namespace cv;
using namespace cv::dnn;
namespace computervision
{
#define MPI
#ifdef MPI
const int POSE_PAIRS[7][2] =
{
{0,1}, {1,2}, {2,3},
{3,4}, {1,5}, {5,6},
{6,7}
};
string protoFile = "res/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt";
string weightsFile = "res/pose/mpi/pose_iter_160000.caffemodel";
int nPoints = 8;
#endif
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0}, {0,14},
{14,16}, {0,15}, {15,17}
};
string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
Net net;
void OpenPoseVideo::setup() {
net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
}
void OpenPoseVideo::movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f) {
std::cout << "movement skeleton start" << std::endl;
int inWidth = 368;
int inHeight = 368;
float thresh = 0.01;
Mat frame;
int frameWidth = inputImage.size().width;
int frameHeight = inputImage.size().height;
double t = (double)cv::getTickCount();
std::cout << "reading input image and blob" << std::endl;
frame = inputImage;
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
std::cout << "done reading image and blob" << std::endl;
net.setInput(inpBlob);
std::cout << "done setting input to net" << std::endl;
Mat output = net.forward();
std::cout << "time took to set input and forward: " << t << std::endl;
int H = output.size[2];
int W = output.size[3];
std::cout << "about to find position of boxy parts" << std::endl;
// find the position of the body parts
vector<Point> points(nPoints);
for (int n = 0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0, n));
Point2f p(-1, -1);
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W;
p.y *= (float)frameHeight / H;
circle(frame, cv::Point((int)p.x, (int)p.y), 8, Scalar(0, 255, 255), -1);
cv::putText(frame, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1.1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
cv::putText(frame, cv::format("time taken = %.2f sec", t), cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, .8, cv::Scalar(255, 50, 0), 2);
std::cout << "time taken: " << t << std::endl;
//imshow("Output-Keypoints", frame);
//imshow("Output-Skeleton", frame);
std::cout << "about to call points receiving method" << std::endl;
f(points,frame);
}
}

View File

@@ -0,0 +1,19 @@
#pragma once
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
namespace computervision
{
class OpenPoseVideo{
private:
public:
void movementSkeleton(Mat& inputImage, std::function<void(std::vector<Point>&, cv::Mat& poinst_on_image)> f);
void setup();
};
}

View File

@@ -1,4 +1,5 @@
#include "SkinDetector.h"
#include <iostream>
/*
Author: Pierfrancesco Soffritti https://github.com/PierfrancescoSoffritti
@@ -23,7 +24,7 @@ namespace computervision
int frameWidth = input.size().width, frameHeight = input.size().height;
int rectangleSize = 25;
Scalar rectangleColor = Scalar(255, 0, 255);
Scalar rectangleColor = Scalar(0, 255, 255);
skinColorSamplerRectangle1 = Rect(frameWidth / 5, frameHeight / 2, rectangleSize, rectangleSize);
skinColorSamplerRectangle2 = Rect(frameWidth / 5, frameHeight / 3, rectangleSize, rectangleSize);
@@ -41,6 +42,29 @@ namespace computervision
);
}
void SkinDetector::drawSkinColorSampler(Mat input,int x, int y,int width, int height) {
int frameWidth = width, frameHeight = height;
int rectangleSize = 25;
Scalar rectangleColor = Scalar(0, 255, 255);
skinColorSamplerRectangle1 = Rect(frameWidth / 5 + x, frameHeight / 2 + y, rectangleSize, rectangleSize);
skinColorSamplerRectangle2 = Rect(frameWidth / 5 + x, frameHeight / 3 + y, rectangleSize, rectangleSize);
rectangle(
input,
skinColorSamplerRectangle1,
rectangleColor
);
rectangle(
input,
skinColorSamplerRectangle2,
rectangleColor
);
}
void SkinDetector::calibrate(Mat input) {
Mat hsvInput;
@@ -54,6 +78,19 @@ namespace computervision
calibrated = true;
}
std::vector<int> SkinDetector::calibrateAndReturn(Mat input)
{
Mat hsvInput;
cvtColor(input, hsvInput, CV_BGR2HSV);
Mat sample1 = Mat(hsvInput, skinColorSamplerRectangle1);
Mat sample2 = Mat(hsvInput, skinColorSamplerRectangle2);
calibrated = true;
return calculateAndReturnTresholds(sample1, sample2);
}
void SkinDetector::calculateThresholds(Mat sample1, Mat sample2) {
int offsetLowThreshold = 80;
int offsetHighThreshold = 30;
@@ -75,6 +112,39 @@ namespace computervision
//vHighThreshold = 255;
}
std::vector<int> SkinDetector::calculateAndReturnTresholds(Mat sample1, Mat sample2)
{
calculateThresholds(sample1, sample2);
std::vector<int> res;
res.push_back(hLowThreshold);
res.push_back(hHighThreshold);
res.push_back(sLowThreshold);
res.push_back(sHighThreshold);
res.push_back(vLowThreshold);
res.push_back(vHighThreshold);
return res;
}
void SkinDetector::setTresholds(std::vector<int>& tresholds)
{
if (tresholds.size() != 6)
{
std::cout << "tresholds array not the right size!" << std::endl;
return;
}
hLowThreshold = tresholds[0];
hHighThreshold = tresholds[1];
sLowThreshold = tresholds[2];
sHighThreshold = tresholds[3];
vLowThreshold = tresholds[4];
vHighThreshold = tresholds[5];
calibrated = true;
}
Mat SkinDetector::getSkinMask(Mat input) {
Mat skinMask;

View File

@@ -24,6 +24,9 @@ namespace computervision
*/
void drawSkinColorSampler(Mat input);
void drawSkinColorSampler(Mat input, int x, int y, int width, int heigth);
/*
* @brief calibrates the skin color detector with the given input frame
*
@@ -31,6 +34,10 @@ namespace computervision
*/
void calibrate(Mat input);
std::vector<int> calibrateAndReturn(Mat input);
void setTresholds(std::vector<int>& tresholds);
/*
* @brief gets the mask for the hand
*
@@ -63,6 +70,8 @@ namespace computervision
*/
void calculateThresholds(Mat sample1, Mat sample2);
std::vector<int> calculateAndReturnTresholds(Mat sample1, Mat sample2);
/**
* @brief the opening. it generates the structuring element and performs the morphological transformations required to detect the hand.
* This needs to be done to get the skin mask.

View File

@@ -0,0 +1,12 @@
#pragma once
#include <opencv2/videoio.hpp>
namespace static_camera
{
static cv::VideoCapture getCap()
{
static cv::VideoCapture cap(0);
return cap;
}
};

View File

@@ -0,0 +1,46 @@
#include <iostream>
#include "async_arm_detection.h"
#include "../OpenPoseVideo.h"
#include <thread>
#include "StaticCameraInstance.h"
namespace computervision
{
AsyncArmDetection::AsyncArmDetection()
{
}
void AsyncArmDetection::run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
VideoCapture cap = static_camera::getCap();
std::cout << "STARTING THREAD LAMBDA" << std::endl;
/*cv::VideoCapture cap = static_camera::GetCap();*/
if (!cap.isOpened())
{
std::cout << "capture was closed, opening..." << std::endl;
cap.open(0);
}
while (true)
{
Mat img;
cap.read(img);
op.movementSkeleton(img, points_ready_func);
}
}
void AsyncArmDetection::start(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op)
{
std::cout << "starting function" << std::endl;
std::thread async_arm_detect_thread(&AsyncArmDetection::run_arm_detection,this, points_ready_func, op);
async_arm_detect_thread.detach(); // makes sure the thread is detached from the variable.
}
}

View File

@@ -0,0 +1,23 @@
#pragma once
#include <vector>
#include <opencv2/core/types.hpp>
#include <opencv2/videoio.hpp>
#include <functional>
#include "../OpenPoseVideo.h"
#include "StaticCameraInstance.h"
namespace computervision
{
class AsyncArmDetection
{
public:
AsyncArmDetection(void);
void start(std::function<void(std::vector<cv::Point>, cv::Mat poinst_on_image)>, computervision::OpenPoseVideo op);
private:
void run_arm_detection(std::function<void(std::vector<Point>, cv::Mat poinst_on_image)> points_ready_func, OpenPoseVideo op);
};
}

View File

@@ -0,0 +1,92 @@
#include "HandCalibrator.h"
#include <iostream>
#define MIN_MENU_HAND_SIZE 10000
#define MIN_GAME_HAND_SIZE 3000 // todo change
namespace computervision
{
namespace handcalibration
{
HandCalibrator::HandCalibrator()
{
}
void HandCalibrator::DrawHandCalibrationText(cv::Mat& output_frame)
{
cv::rectangle(output_frame, cv::Rect(0, 0, output_frame.cols, 40), cv::Scalar(0, 0, 0), -1);
cv::putText(output_frame, "Hand calibration", cv::Point(output_frame.cols / 2 - 100, 25), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(18, 219, 65), 2);
cv::putText(output_frame, "press 'b' to calibrate background,then press 's' to calibrate skin tone", cv::Point(5, 35), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(18, 219, 65), 1);
cv::rectangle(output_frame, cv::Rect(0, output_frame.rows - 80, 450, output_frame.cols), cv::Scalar(0, 0, 0), -1);
cv::putText(output_frame, "hand in frame:", cv::Point(5, output_frame.rows - 50), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 0), 1);
cv::rectangle(output_frame, cv::Rect(420, output_frame.rows - 67, 15, 15), hand_present ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255), -1);
DrawBackgroundSkinCalibrated(output_frame);
if (hand_present)
{
std::string hand_text = fingers_amount > 0 ? "open" : "closed";
cv::putText(output_frame, hand_text, cv::Point(10, 75), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 0, 255), 3);
}
}
void HandCalibrator::DrawBackgroundSkinCalibrated(cv::Mat& output_frame)
{
cv::putText(output_frame, "background calibrated:", cv::Point(5, output_frame.rows - 30), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 0), 1);
cv::rectangle(output_frame, cv::Rect(420, output_frame.rows - 47, 15, 15), background_calibrated ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255), -1);
cv::putText(output_frame, "skin color calibrated:", cv::Point(5, output_frame.rows - 10), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 0), 1);
cv::rectangle(output_frame, cv::Rect(420, output_frame.rows - 27, 15, 15), skintone_calibrated ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255), -1);
}
void HandCalibrator::SetSkinCalibration(bool val)
{
skintone_calibrated = val;
}
void HandCalibrator::SetBackGroundCalibrated(bool val)
{
background_calibrated = val;
}
void HandCalibrator::SetHandPresent(bool val)
{
hand_present = val;
}
void HandCalibrator::SetAmountOfFingers(int amount)
{
fingers_amount = amount;
}
bool HandCalibrator::CheckIfHandPresent(cv::Mat input_image, HandDetectionType type)
{
std::vector<std::vector<cv::Point>> points;
cv::findContours(input_image, points, cv::RetrievalModes::RETR_LIST, cv::ContourApproximationModes::CHAIN_APPROX_SIMPLE);
if (points.size() == 0) return false;
for (int p = 0; p < points.size(); p++)
{
int area = cv::contourArea(points[p]);
if (type == handcalibration::HandDetectionType::MENU)
if (area > MIN_MENU_HAND_SIZE) return true;
if (type == handcalibration::HandDetectionType::GAME)
if (area > MIN_GAME_HAND_SIZE) return true;
}
return false;
}
}
}

View File

@@ -0,0 +1,76 @@
#pragma once
#include <opencv2/core/base.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
namespace computervision
{
namespace handcalibration
{
enum class HandDetectionType
{
MENU,
GAME
};
class HandCalibrator
{
public:
HandCalibrator();
/**
* @brief draws the text to show the status of the calibration on the image
*
* @param output_frame the frame to draw on.
*/
void DrawHandCalibrationText(cv::Mat& output_frame);
/**
* @brief sets the skin calibration variable.
*
* @param val the value to set
*/
void SetSkinCalibration(bool val);
/**
* @brief sets the background calibration variable.
*
* @param val the value to set
*/
void SetBackGroundCalibrated(bool val);
/**
* @brief sets the value for if the hand is present.
*
* @param val the value to set.
*/
void SetHandPresent(bool val);
/**
* @brief checks if the hand is present in the given image
*
* @param input_image the input image to check.
*/
bool CheckIfHandPresent(cv::Mat input_image, HandDetectionType type);
/**
* @brief sets the amount of fingers that are currently detected.
*
* @param amount the amount of fingers.
*/
void SetAmountOfFingers(int amount);
void DrawBackgroundSkinCalibrated(cv::Mat& output_frame);
private:
bool background_calibrated;
bool skintone_calibrated;
bool hand_present;
int fingers_amount;
};
}
}

View File

@@ -1,4 +1,6 @@
#include "camera.h"
#include <iostream>
#include "../toolbox/toolbox.h"
namespace entities
{
@@ -7,9 +9,20 @@ namespace entities
rotation(rotation)
{}
void Camera::Follow(glm::vec3 follow_position) {
//set position to follow in front of the camera
follow_position.z += 100;
//set position to follow a bit lower
follow_position.y += 50;
//move position from original position to given position with smoothing
position = toolbox::Lerp(position, follow_position, 0.1);
}
void Camera::Move(GLFWwindow* window)
{
float movement_speed = 0;
float up_down_speed = 0;
float side_speed = 0;
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
{
@@ -23,28 +36,25 @@ namespace entities
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
{
rotation.y += ROT_SPEED;
side_speed += SPEED;
}
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
{
rotation.y -= ROT_SPEED;
side_speed -= SPEED;
}
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
{
rotation.x -= ROT_SPEED;
up_down_speed += UP_SPEED;
}
if (glfwGetKey(window, GLFW_KEY_LEFT_SHIFT) == GLFW_PRESS)
{
rotation.x += ROT_SPEED;
up_down_speed -= UP_SPEED;
}
float dx = glm::cos(glm::radians(rotation.y + 90)) * movement_speed;
float dz = glm::sin(glm::radians(rotation.y + 90)) * movement_speed;
position.x += dx;
position.z += dz;
position.x += side_speed;
position.z += movement_speed;
position.y += up_down_speed;
}
}

View File

@@ -13,13 +13,14 @@ namespace entities
{
private:
// The movement speed of the camera
const float SPEED = 0.52f;
const float ROT_SPEED = 1.0f;
const float SPEED = 0.5f;
const float UP_SPEED = 1.0f;
glm::vec3 position;
glm::vec3 rotation;
public:
Camera(const ::glm::vec3& position, const ::glm::vec3& rotation);
/*
@@ -28,6 +29,13 @@ namespace entities
* @param window: The OpenGL window
*/
void Move(GLFWwindow* window);
/*
* @brief follows the given position with smoothing
*
* @param follow_position the position of the object the camera has to follow
*/
void Follow(glm::vec3 follow_position);
inline glm::vec3 GetPosition() const{ return position; }
inline glm::vec3 GetRotation() const{ return rotation; }

View File

@@ -14,7 +14,9 @@ namespace entities
{
if (on_collide != nullptr)
{
on_collide(collision);
on_collide(collision);
}
}

View File

@@ -2,6 +2,8 @@
#include "entity.h"
#include "../collision/collision.h"
#include <memory>
#include <functional>
namespace entities
{
@@ -16,7 +18,8 @@ namespace entities
glm::vec3 min_xyz;
glm::vec3 max_xyz;
void (*on_collide)(const collision::Collision& collision);
//void (*on_collide)(const collision::Collision& collision);
std::function<void(const collision::Collision&)> on_collide;
public:
CollisionEntity(const models::TexturedModel& model, const glm::vec3& position, const glm::vec3& rotation,
@@ -52,7 +55,7 @@ namespace entities
*
* @param function: A function pointer to a function with the collision behaviour
*/
void SetCollisionBehaviour(void (*function)(const collision::Collision& collision))
void SetCollisionBehaviour(std::function<void(const collision::Collision&)> function)
{ if (function != nullptr) { on_collide = function; } }
protected:

View File

@@ -0,0 +1,258 @@
#include "house_generator.h"
#include <functional>
#include <iostream>
#include "../renderEngine/obj_loader.h"
#include "../renderEngine/Loader.h"
#include "../toolbox/toolbox.h"
#include "collision_entity.h"
namespace entities
{
HouseGenerator::HouseGenerator()
{
models::RawModel raw_model = render_engine::LoadObjModel("res/HouseNew.obj");
default_texture = { render_engine::loader::LoadTexture("res/Texture.png") };
default_texture.shine_damper = 10;
house_model = { raw_model, default_texture };
GenerateFurnitureModels();
}
std::deque<std::shared_ptr<Entity>> HouseGenerator::GenerateHouse(const glm::vec3& position, float y_rotation)
{
std::deque<std::shared_ptr<Entity>> furniture;
// Add house
furniture.push_front(std::make_shared<Entity>(house_model, position, glm::vec3(0, y_rotation, 0), HOUSE_SIZE));
for(int i = 0; i<toolbox::Random(1,4);i++)
{
FurnitureType type = FurnitureType(toolbox::Random(0, furniture_models.size() - 1));
models::TexturedModel model = GetFurnitureModel(type);
glm::vec3 model_pos = glm::vec3(position.x, position.y, position.z);
collision::Box model_box = { model_pos, model.raw_model.model_size };
model_box.SetRotation(-90);
furniture.push_back(std::make_shared<CollisionEntity>(model, model_pos, glm::vec3(0, -90, 0), HOUSE_SIZE * 2, model_box));
}
/*
// Add furniture
models::TexturedModel couch = GetFurnitureModel(FurnitureType::COUCH);
glm::vec3 couch_pos = glm::vec3(position.x + 200, position.y, position.z + 10);
collision::Box couch_box = { couch_pos, couch.raw_model.model_size };
couch_box.SetRotation(-90);
furniture.push_back(std::make_shared<CollisionEntity>(couch, couch_pos, glm::vec3(0, -90, 0), HOUSE_SIZE * 2, couch_box));
models::TexturedModel table = GetFurnitureModel(FurnitureType::TABLE);
glm::vec3 table_pos = glm::vec3(position.x - 30, position.y, position.z);
collision::Box table_box = { table_pos, table.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(table, table_pos, glm::vec3(0, 0, 0), HOUSE_SIZE * 1.3, table_box));
models::TexturedModel chair = GetFurnitureModel(FurnitureType::CHAIR);
glm::vec3 chair_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box chair_box = { chair_pos, chair.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(chair, chair_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, chair_box));
models::TexturedModel plant = GetFurnitureModel(FurnitureType::PLANT);
glm::vec3 plant_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box plant_box = { plant_pos, plant.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(plant, plant_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, plant_box));
models::TexturedModel guitar = GetFurnitureModel(FurnitureType::GUITAR);
glm::vec3 guitar_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box guitar_box = { guitar_pos, guitar.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(guitar, guitar_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, guitar_box));
models::TexturedModel bookshelf = GetFurnitureModel(FurnitureType::BOOKSHELF);
glm::vec3 bookshelf_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box bookshelf_box = { bookshelf_pos, bookshelf.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(bookshelf, bookshelf_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, bookshelf_box));
models::TexturedModel lamp = GetFurnitureModel(FurnitureType::LAMP);
glm::vec3 lamp_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box lamp_box = { lamp_pos, lamp.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(lamp, lamp_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, lamp_box));
models::TexturedModel ceiling_object = GetFurnitureModel(FurnitureType::CEILING_OBJECTS);
glm::vec3 ceiling_object_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box ceiling_object_box = { ceiling_object_pos, ceiling_object.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(ceiling_object, ceiling_object_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, ceiling_object_box));
models::TexturedModel misc = GetFurnitureModel(FurnitureType::MISC);
glm::vec3 misc_pos = glm::vec3(position.x - 50, position.y, position.z + 220);
collision::Box misc_box = { misc_pos, misc.raw_model.model_size };
furniture.push_back(std::make_shared<CollisionEntity>(misc, misc_pos, glm::vec3(0, 0, 0), HOUSE_SIZE, misc_box));
*/
return furniture;
}
models::TexturedModel HouseGenerator::GetFurnitureModel(FurnitureType furniture)
{
const auto found = furniture_models.find(furniture);
if (found == furniture_models.end())
{
std::cerr << "OH NEEEEEEEEEEEEEEE";
}
auto models = found->second;
const int modelNumber = toolbox::Random(0, models.size() - 1);
return models[modelNumber];
}
void HouseGenerator::GenerateFurnitureModels()
{
// Couches
std::deque<models::TexturedModel> couches;
models::RawModel couch_inside_model = render_engine::LoadObjModel("res/couchThree.obj");
models::TexturedModel couch_inside = { couch_inside_model, default_texture };
couches.push_back(couch_inside);
models::RawModel couch_inside_model2 = render_engine::LoadObjModel("res/Coach.obj");
models::TexturedModel couch_inside2 = { couch_inside_model2, default_texture };
couches.push_back(couch_inside2);
models::RawModel couch_inside_model3 = render_engine::LoadObjModel("res/lawnBenchOne.obj");
models::TexturedModel couch_inside3 = { couch_inside_model3, default_texture };
couches.push_back(couch_inside3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::COUCH, couches));
// Tables
std::deque<models::TexturedModel> tables;
models::RawModel table_model1 = render_engine::LoadObjModel("res/tableOne.obj");
models::TexturedModel table1 = { table_model1, default_texture };
tables.push_back(table1);
models::RawModel table_model2 = render_engine::LoadObjModel("res/tableTwo.obj");
models::TexturedModel table2 = { table_model2, default_texture };
tables.push_back(table2);
models::RawModel table_model3 = render_engine::LoadObjModel("res/bureauOne.obj");
models::TexturedModel table3 = { table_model3, default_texture };
tables.push_back(table3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::TABLE, tables));
// Chairs
std::deque<models::TexturedModel> chairs;
models::RawModel chair_model1 = render_engine::LoadObjModel("res/launchchair.obj");
models::TexturedModel chair1 = { chair_model1, default_texture };
chairs.push_back(chair1);
models::RawModel chair_model2 = render_engine::LoadObjModel("res/lawnChairOne.obj");
models::TexturedModel chair2 = { chair_model2, default_texture };
chairs.push_back(chair2);
models::RawModel chair_model3 = render_engine::LoadObjModel("res/ugly_chair.obj");
models::TexturedModel chair3 = { chair_model3, default_texture };
chairs.push_back(chair3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::CHAIR, chairs));
// Plants
std::deque<models::TexturedModel> plants;
models::RawModel plant_model1 = render_engine::LoadObjModel("res/plantOne.obj");
models::TexturedModel plant1 = { plant_model1, default_texture };
plants.push_back(plant1);
models::RawModel plant_model2 = render_engine::LoadObjModel("res/plantTwo.obj");
models::TexturedModel plant2 = { plant_model2, default_texture };
plants.push_back(plant2);
models::RawModel plant_model3 = render_engine::LoadObjModel("res/plantThree.obj");
models::TexturedModel plant3 = { plant_model3, default_texture };
plants.push_back(plant3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::PLANT, plants));
// Guitars
std::deque<models::TexturedModel> guitars;
models::RawModel guitar_model1 = render_engine::LoadObjModel("res/guitarOne.obj");
models::TexturedModel guitar1 = { guitar_model1, default_texture };
guitars.push_back(guitar1);
models::RawModel guitar_model2 = render_engine::LoadObjModel("res/guitarTwo.obj");
models::TexturedModel guitar2 = { guitar_model2, default_texture };
guitars.push_back(guitar2);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::GUITAR, guitars));
// Bookshelves
std::deque<models::TexturedModel> bookshelves;
models::RawModel bookshelf_model1 = render_engine::LoadObjModel("res/bookShelfOne.obj");
models::TexturedModel bookshelf1 = { bookshelf_model1, default_texture };
bookshelves.push_back(bookshelf1);
models::RawModel bookshelf_model2 = render_engine::LoadObjModel("res/bookShelfTwo.obj");
models::TexturedModel bookshelf2 = { bookshelf_model2, default_texture };
bookshelves.push_back(bookshelf2);
models::RawModel bookshelf_model3 = render_engine::LoadObjModel("res/bookShelfThree.obj");
models::TexturedModel bookshelf3 = { bookshelf_model3, default_texture };
bookshelves.push_back(bookshelf3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::BOOKSHELF, bookshelves));
// Lamps
std::deque<models::TexturedModel>lamps;
models::RawModel lamp_model1 = render_engine::LoadObjModel("res/lampOne.obj");
models::TexturedModel lamp1 = { lamp_model1, default_texture };
lamps.push_back(lamp1);
models::RawModel lamp_model2 = render_engine::LoadObjModel("res/lampTwo.obj");
models::TexturedModel lamp2 = { lamp_model2, default_texture };
lamps.push_back(lamp2);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::LAMP, lamps));
// Ceiling objects
std::deque<models::TexturedModel>ceiling_Objects;
models::RawModel ceiling_Obj_model1 = render_engine::LoadObjModel("res/ceilingFan.obj");
models::TexturedModel ceiling_Obj1 = { ceiling_Obj_model1, default_texture };
ceiling_Objects.push_back(ceiling_Obj1);
models::RawModel ceiling_Obj_model2 = render_engine::LoadObjModel("res/ceilingFanTwo.obj");
models::TexturedModel ceiling_Obj2 = { ceiling_Obj_model2, default_texture };
ceiling_Objects.push_back(ceiling_Obj2);
models::RawModel ceiling_Obj_model3 = render_engine::LoadObjModel("res/ceilingLampOne.obj");
models::TexturedModel ceiling_Obj3 = { ceiling_Obj_model3, default_texture };
ceiling_Objects.push_back(ceiling_Obj3);
models::RawModel ceiling_Obj_model4 = render_engine::LoadObjModel("res/ceilingLampTwo.obj");
models::TexturedModel ceiling_Obj4 = { ceiling_Obj_model4, default_texture };
ceiling_Objects.push_back(ceiling_Obj4);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::CEILING_OBJECTS, ceiling_Objects));
// Miscs
std::deque<models::TexturedModel> miscs;
models::RawModel misc_model1 = render_engine::LoadObjModel("res/tv.obj");
models::TexturedModel misc1 = { misc_model1, default_texture };
miscs.push_back(misc1);
models::RawModel misc_model2 = render_engine::LoadObjModel("res/radio.obj");
models::TexturedModel misc2 = { misc_model2, default_texture };
miscs.push_back(misc2);
models::RawModel misc_model3 = render_engine::LoadObjModel("res/Flowerpot.obj");
models::TexturedModel misc3 = { misc_model3, default_texture };
miscs.push_back(misc3);
furniture_models.insert(std::pair<FurnitureType, std::deque<models::TexturedModel>>(FurnitureType::MISC, miscs));
}
}

View File

@@ -0,0 +1,67 @@
#pragma once
#include <deque>
#include <memory>
#include <map>
#include "../models/Model.h"
#include "../collision/collision.h"
namespace entities
{
enum class FurnitureType
{
COUCH,
TABLE,
CHAIR,
PLANT,
GUITAR,
BOOKSHELF,
LAMP,
CEILING_OBJECTS,
MISC
};
class HouseGenerator
{
private:
const float HOUSE_SIZE = 30;
models::TexturedModel house_model;
models::ModelTexture default_texture;
std::map<FurnitureType, std::deque<models::TexturedModel>> furniture_models;
public:
HouseGenerator();
/*
* @brief: This function generates a house with furniture at the given position and rotation
*
* @param position: The position of the house to render
* @param y_rotation: The y rotation the house needs to be rendered with
*
* @return: A list with all the entities of the generated house (the furniture)
*/
std::deque<std::shared_ptr<Entity>> GenerateHouse(const glm::vec3& position, float y_rotation);
/*
* @brief: Returns the depth of the house (chunk)
*/
float GetHouseDepth() const { return house_model.raw_model.model_size.x * HOUSE_SIZE; }
private:
/*
* @brief: This function loads all the 3D furniture models
*/
void GenerateFurnitureModels();
/*
* @brief: This funtion chooses and returns a random furniture of the given furniture type
*
* @param furniture: The furniture you want to get
*
* @return: The model of the random furniture of the chosen furniture type
*/
models::TexturedModel GetFurnitureModel(FurnitureType furniture);
};
}

View File

@@ -0,0 +1,100 @@
#include "main_character.h"
#include "../models/Model.h"
#include <iostream>
#include "entity.h"
#include"../renderEngine/Renderer.h"
#include"../renderEngine/obj_loader.h"
#include"../renderEngine/loader.h"
namespace entities
{
float movement_speed;
float down_speed;
float side_speed;
bool is_playing;
MainCharacter::MainCharacter(const models::TexturedModel& model, const glm::vec3& position,
const glm::vec3& rotation, float scale, const collision::Box& bounding_box)
: CollisionEntity(model, position, rotation, scale, bounding_box)
{
is_playing = true;
}
void MainCharacter::Move(GLFWwindow* window)
{
if (is_playing) {
movement_speed = -0.5f; //Forward speed adjustment, bee is moving at a standard speedrate
down_speed = -1.0f; //Down speed adjustment, downspeed is difference between down_speed and UP_SPEED
side_speed = 0; //Side speed adjustment
//For gameplay with use of keyboard keys: W, A, S, D
//W: Go forward
//A: Go left
//S: Go backwards
//D: Go right
//TODO Implement CV actions
SetRotation(glm::vec3(0, 90, 0));
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
{
movement_speed -= SIDE_SPEED;
}
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
{
movement_speed += SIDE_SPEED;
}
//top right
if (glfwGetKey(window, GLFW_KEY_E) == GLFW_PRESS)
{
side_speed += SIDE_SPEED;
down_speed += UP_SPEED;
}
//right
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
{
side_speed += SIDE_SPEED;
}
//top left
if (glfwGetKey(window, GLFW_KEY_Q) == GLFW_PRESS)
{
down_speed += UP_SPEED;
side_speed -= SIDE_SPEED;
}
//left
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
{
side_speed -= SIDE_SPEED;
}
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
{
down_speed += UP_SPEED;
SetRotation(glm::vec3(10, 90, 0));
}
if (glfwGetKey(window, GLFW_KEY_LEFT_SHIFT) == GLFW_PRESS)
{
down_speed -= UP_SPEED;
}
}
IncreasePosition(glm::vec3(side_speed, down_speed, movement_speed));
//Use only for binding bee to house, such that it doesn't go outside of the room.
//TODO delete when boundingbox is implemented!
if (position.x > 190) position.x = 190;
else if (position.x < -190) position.x = -190;
if (position.y > 350) position.y = 350;
else if (position.y < -40) position.y = -40;
//Move player bounding box according to the position on screen
MoveCollisionBox();
if (glfwGetKey(window, GLFW_KEY_Z) == GLFW_PRESS)
{
is_playing = true;
}
}
void MainCharacter::OnCollide(const collision::Collision& collision) {
down_speed = -2.0f;
movement_speed = 0.0f;
is_playing = false;
std::cout << "collision" << std::endl;
}
}

View File

@@ -0,0 +1,38 @@
#pragma once
#include "collision_entity.h"
#include "../shaders/entity_shader.h"
namespace entities
{
/*
* This class contains the information about the player model
*/
class MainCharacter : public CollisionEntity {
const float SIDE_SPEED = 0.8f; //Standard movement speed for left/right movement
const float UP_SPEED = 2.0f; //Standard movement speed for up movement
public:
/*
* @brief: Constructor for the main character model
*
* @param model: Model to load in as the player model
* @param position: Position of the model inside the game window
* @param rotation: Rotation of the model inside the game window
* @param scale: Size of the model
* @param bounding_box: Collision box around the player model
*/
MainCharacter(const models::TexturedModel& model, const glm::vec3& position,
const glm::vec3& rotation, float scale, const collision::Box& bounding_box);
/*
* @brief: A function to move the character inside the window
*
* @param window: The game window
*
* @return: Vector with the adjusted side_speed, down_speed, and movement_speed
*/
void Move(GLFWwindow* window);
void OnCollide(const collision::Collision& collision) override;
};
}

View File

@@ -1,6 +1,8 @@
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/gtc/matrix_transform.hpp>
#include <functional>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION
#include <iostream>
#include <map>
@@ -14,6 +16,7 @@
#include <opencv2/videoio.hpp>
#include <opencv2/video.hpp>
#include "collision/collision.h"
#include "gui/gui_interactable.h"
#include "models/model.h"
#include "renderEngine/loader.h"
@@ -25,6 +28,12 @@
#include "scenes/in_Game_Scene.h"
#include "scenes/startup_Scene.h"
#include "computervision/ObjectDetection.h"
//#include "computervision/OpenPoseImage.h"
#include "computervision/OpenPoseVideo.h"
#include "computervision/async/async_arm_detection.h"
#pragma comment(lib, "glfw3.lib")
#pragma comment(lib, "glew32s.lib")
#pragma comment(lib, "opengl32.lib")
@@ -35,6 +44,19 @@ static GLFWwindow* window;
scene::Scene* current_scene;
static GLFWwindow* window;
bool points_img_available = false;
cv::Mat points_img;
void retrieve_points(std::vector<Point> arm_points, cv::Mat points_on_image)
{
std::cout << "got points!!" << std::endl;
std::cout << "points: " << arm_points << std::endl;
points_img = points_on_image;
points_img_available = true;
}
int main(void)
{
#pragma region OPENGL_SETTINGS

View File

@@ -1,6 +1,7 @@
#pragma once
#include <GL/glew.h>
#include <glm/gtc/matrix_transform.hpp>
namespace models
{

View File

@@ -4,7 +4,6 @@
#include "loader.h"
#include "../toolbox/toolbox.h"
#include "renderer.h"
#include <iostream>
namespace render_engine
@@ -51,12 +50,12 @@ namespace render_engine
/*
This function will Render a Model on the screen.
*/
void Render(entities::Entity& entity, shaders::EntityShader& shader)
void Render(std::shared_ptr<entities::Entity> entity, shaders::EntityShader& shader)
{
const models::TexturedModel model = entity.GetModel();
const models::TexturedModel model = entity.get()->GetModel();
const models::RawModel raw_model = model.raw_model;
const models::ModelTexture texture = model.texture;
// Enable the model (VAO)
glBindVertexArray(raw_model.vao_id);
@@ -66,7 +65,7 @@ namespace render_engine
glEnableVertexAttribArray(2);
// Load the transformation of the model into the shader
const glm::mat4 modelMatrix = toolbox::CreateModelMatrix(entity.GetPosition(), entity.GetRotation(), entity.GetScale());
const glm::mat4 modelMatrix = toolbox::CreateModelMatrix(entity.get()->GetPosition(), entity.get()->GetRotation(), entity.get()->GetScale());
shader.LoadModelMatrix(modelMatrix);
shader.LoadShineVariables(texture.shine_damper, texture.reflectivity);

View File

@@ -1,5 +1,6 @@
#pragma once
#include <memory>
#include "../gui/gui_element.h"
#include "../entities/entity.h"
#include "../shaders/entity_shader.h"
@@ -30,7 +31,7 @@ namespace render_engine
@param entity: The entity which needs to be rendered
@param shader: The shader the entity needs to be rendered with
*/
void Render(entities::Entity& entity, shaders::EntityShader& shader);
void Render(std::shared_ptr<entities::Entity> entity, shaders::EntityShader& shader);
/*
@brief: Call this function to render gui_textures on the screen

View File

@@ -1,8 +1,11 @@
#include <iostream>
#include <memory>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "in_Game_Scene.h"
#include "startup_Scene.h"
#include "../entities/main_character.h"
#include "../collision/collision_handler.h"
#include "../gui/gui_interactable.h"
#include "../models/model.h"
#include "../renderEngine/loader.h"
@@ -10,10 +13,41 @@
#include "../renderEngine/renderer.h"
#include "../shaders/entity_shader.h"
#include "../toolbox/toolbox.h"
#include "../entities/house_generator.h"
#include <deque>
#include <functional>
#include <memory>
#include <queue>
#include <opencv2/core/base.hpp>
#include "../computervision/HandDetectRegion.h"
#include "../computervision/ObjectDetection.h"
#define MAX_MODEL_DEQUE_SIZE 6 // max amount of models to load at the same time
#define UPCOMING_MODEL_AMOUNT 4 // how much models should be loaded in front of us
namespace scene
{
std::shared_ptr<entities::MainCharacter>main_character;
std::deque<entities::Light> lights;
std::vector<std::shared_ptr<entities::CollisionEntity>> collision_entities;
entities::HouseGenerator* house_generator;
std::deque<std::shared_ptr<entities::Entity>> house_models;
models::RawModel raw_model, raw_model_char;
models::ModelTexture texture;
shaders::EntityShader* shader;
shaders::GuiShader* gui_shader;
entities::Camera camera(glm::vec3(0, -50, 0), glm::vec3(0, 0, 0));
std::vector<gui::GuiTexture*> guis;
int furniture_count_old;
int score;
std::vector<computervision::HandDetectRegion> regions;
computervision::HandDetectRegion reg_left("left", 0, 0, 150, 150), reg_right("right", 0, 0, 150, 150), reg_up("up", 0, 0, 150, 150);
/**
* sets up the first things when the objects has been made
*/
@@ -22,11 +56,24 @@ namespace scene
camera = new entities::Camera(glm::vec3(0, 0, 0), glm::vec3(0, 0, 0));
shader = new shaders::EntityShader;
shader->Init();
shader->Init();
render_engine::renderer::Init(*shader);
gui_shader = new shaders::GuiShader();
gui_shader->Init();
score = 0;
}
/**
* temporary!!!!
* just to make some bounding boxes
*/
collision::Box create_bounding_box(glm::vec3 size, glm::vec3 pos, int scale) {
collision::Box box = collision::Box();
box.size.x = size.z* scale;
box.size.y = size.y* scale;
box.size.z = size.x* scale;
box.center_pos = pos;
return box;
}
/**
* deletes certain veriables when the object will be deleted, prevents memory leaks
@@ -36,27 +83,70 @@ namespace scene
delete camera;
delete shader;
delete gui_shader;
delete house_generator;
}
/**
* @brief loads a new chunk in front of the camera, and deletes the chunk behind the camera.
*
* @param model_pos the amount of models the camera has passed already. This is the rounded result of (z position of camera) / (size of model)
*
*/
void load_chunk(int model_pos)
{
static unsigned int furniture_count = 0;
// set up squares according to size of camera input
cv::Mat camera_frame;
static_camera::getCap().read(camera_frame); // get camera frame to know the width and heigth
reg_left.SetXPos(10);
reg_left.SetYPos(camera_frame.rows / 2 - reg_left.GetHeight()/2);
reg_right.SetXPos(camera_frame.cols - 10 - reg_right.GetWidth());
reg_right.SetYPos(camera_frame.rows / 2 - reg_right.GetHeight()/2);
reg_up.SetXPos(camera_frame.cols / 2 - reg_up.GetWidth() / 2);
reg_up.SetYPos(10);
std::cout << "loading model chunk" << std::endl;
if (house_models.size() >= MAX_MODEL_DEQUE_SIZE * furniture_count)
{
for (int i = 0; i < furniture_count; i++)
{
house_models.pop_front();
}
}
int z_offset = model_pos * (house_generator->GetHouseDepth()); // how much "in the distance" we should load the model
std::deque<std::shared_ptr<entities::Entity>> furniture = house_generator->GenerateHouse(glm::vec3(0, -75, -50 - z_offset), 90);
furniture_count = furniture.size();
house_models.insert(house_models.end(), furniture.begin(), furniture.end());
std::cout << "funriture_count in load chunk (house included): " << furniture_count << std::endl;
furniture_count_old = furniture_count -1;
}
/**
* starts the game scene, calls the render and update methods in a while loop
*/
scene::Scenes scene::In_Game_Scene::start(GLFWwindow* window)
{
raw_model = render_engine::LoadObjModel("res/House.obj");
texture = { render_engine::loader::LoadTexture("res/Texture.png") };
texture.shine_damper = 10;
texture.reflectivity = 0;
models::TexturedModel model = { raw_model, texture };
int z = 0;
for (int i = 0; i < 5; ++i)
raw_model_char = render_engine::LoadObjModel("res/beeTwo.obj");
models::TexturedModel model_char = { raw_model_char, texture };
collision::Box char_box = create_bounding_box(raw_model_char.model_size, glm::vec3(0, 0, 0), 1);
main_character = std::make_shared<entities::MainCharacter>(model_char, glm::vec3(0, -50, -100), glm::vec3(0, 90, 0), 5, char_box);
collision_entities.push_back(main_character);
house_generator = new entities::HouseGenerator();
// load the first few house models
for (int i = 0; i <= UPCOMING_MODEL_AMOUNT; i++)
{
entities_to_render.push_back(entities::Entity(model, glm::vec3(0, -50, -50 - z), glm::vec3(0, 90, 0), 20));
z += (raw_model.model_size.x * 20);
load_chunk(i);
}
lights.push_back(entities::Light(glm::vec3(0, 1000, -7000), glm::vec3(5, 5, 5)));
lights.push_back(entities::Light(glm::vec3(0, 1000, 7000), glm::vec3(5, 5, 5))); // sun
lights.push_back(entities::Light(glm::vec3(0, 0, -30), glm::vec3(2, 0, 2), glm::vec3(0.0001f, 0.0001f, 0.0001f)));
lights.push_back(entities::Light(glm::vec3(0, 0, -200), glm::vec3(0, 2, 0), glm::vec3(0.0001f, 0.0001f, 0.0001f)));
@@ -65,11 +155,11 @@ namespace scene
button.SetHoverTexture(render_engine::loader::LoadTexture("res/Texture.png"));
button.SetClickedTexture(render_engine::loader::LoadTexture("res/Mayo.png"));
button.SetOnClickAction([]()
{
std::cout << "I got clicked on!" << std::endl;
});
{
std::cout << "I got clicked on!" << std::endl;
});
guis.push_back(&button);
//guis for the pause menu
gui::GuiTexture background(render_engine::loader::LoadTexture("res/background_grey.png"), glm::vec2(0, 0), glm::vec2(1, 1));
@@ -139,14 +229,15 @@ namespace scene
//starts the shader and begins to render
shader->Start();
shader->LoadSkyColor(render_engine::renderer::SKY_COLOR);
shader->LoadLights(lights);
shader->LoadViewMatrix(*camera);
shader->LoadLightsDeque(lights);
shader->LoadViewMatrix(camera);
// Renders each entity in the entities list
for (entities::Entity& entity : entities_to_render)
for (std::shared_ptr<entities::Entity> model_entity : house_models)
{
render_engine::renderer::Render(entity, *shader);
render_engine::renderer::Render(model_entity, *shader);
}
render_engine::renderer::Render(*main_character, *shader);
// Render GUI items
//render_engine::renderer::Render(guis, *gui_shader);
@@ -158,20 +249,38 @@ namespace scene
//updates certain variables
void scene::In_Game_Scene::update(GLFWwindow* window)
{
camera->Move(window);
}
//camera.Move(window);
main_character->Move(window);
//std::cout << "x get: " << movement.x << "\ny get: " << movement.y << "\nz get: " << movement.z << "\n";
camera.Follow(main_character->GetPosition());
//renders the models for the pause menu
void In_Game_Scene::render_pause_menu()
{
render_engine::renderer::Render(pause_guis, *gui_shader);
// calculate where the next house model should be loaded
static int last_model_pos = 0;
int model_pos = -round(camera.GetPosition().z / (house_generator->GetHouseDepth())); // how much models we have passed, minus because we are moving in the negative z axis
// if we have passed a model, load a new one and delete the one behind us
if (last_model_pos != model_pos)
{
load_chunk(model_pos + UPCOMING_MODEL_AMOUNT);
score += furniture_count_old;
std::cout << "Score: " << score << std::endl;
std::cout << "Funriture_count_old in model (house excluded): " << furniture_count_old << std::endl;
}
// remember the position at which the new model was added
last_model_pos = model_pos;
collision::CheckCollisions(collision_entities);
update_hand_detection();
}
//manages the key input in the game scene
void scene::In_Game_Scene::onKey(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
{
cv::destroyWindow("camera");
return_value = scene::Scenes::STOP;
}
if (glfwGetKey(window, GLFW_KEY_P) == GLFW_PRESS)
@@ -182,5 +291,30 @@ namespace scene
{
game_state = scene::Game_State::RUNNING;
}
if (glfwGetKey(window, GLFW_KEY_B) == GLFW_PRESS)
{
reg_left.CalibrateBackground();
reg_right.CalibrateBackground();
reg_up.CalibrateBackground();
}
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
{
std::vector<int> tresholds = reg_left.CalculateSkinTresholds();
reg_right.setSkinTresholds(tresholds);
reg_up.setSkinTresholds(tresholds);
}
}
void scene::In_Game_Scene::update_hand_detection()
{
cv::Mat camera_frame;
static_camera::getCap().read(camera_frame);
reg_left.DetectHand(camera_frame);
reg_right.DetectHand(camera_frame);
reg_up.DetectHand(camera_frame);
cv::imshow("camera", camera_frame);
}
}

View File

@@ -59,7 +59,7 @@ namespace scene
* @return void
*/
void render_pause_menu();
void update_hand_detection();
public:
In_Game_Scene();

View File

@@ -15,6 +15,7 @@
#include "../toolbox/toolbox.h"
#include "../computervision/MenuTest.h"
#include "../computervision/ObjectDetection.h"
#include "../computervision/HandDetectRegion.h"
@@ -23,6 +24,7 @@ namespace scene
{
shaders::GuiShader* gui_shader1;
std::vector<gui::GuiTexture*> guis1;
computervision::ObjectDetection objDetect;
float item_number = 0;
@@ -186,6 +188,8 @@ namespace scene
if (new_button != NULL)
new_button->Update(window);
}
bool hand_present;
objDetect.DetectHand(objDetect.ReadCamera(),hand_present);
}
/**
@@ -196,6 +200,7 @@ namespace scene
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
{
return_value = scene::Scenes::INGAME;
cv::destroyWindow("camera");
}
else if (glfwGetKey(window, GLFW_KEY_BACKSPACE) == GLFW_PRESS) {
hand_mode = !hand_mode;

View File

@@ -1,5 +1,6 @@
#include "entity_shader.h"
#include "../toolbox/toolbox.h"
#include <deque>
namespace shaders
{
@@ -28,7 +29,7 @@ namespace shaders
uniform vec3 light_position[4];
const float density = 0.0017;
const float gradient = 4;
const float gradient = 3;
void main(void)
{
@@ -160,6 +161,25 @@ namespace shaders
}
}
void EntityShader::LoadLightsDeque(std::deque<entities::Light>& lights) const
{
for (int i = 0; i < MAX_LIGHTS; ++i)
{
if (i < lights.size())
{
LoadVector(location_light_position[i], lights[i].GetPosition());
LoadVector(location_light_color[i], lights[i].GetColor());
LoadVector(location_light_attenuation[i], lights[i].GetAttenuation());
}
else
{
LoadVector(location_light_position[i], glm::vec3(0, 0, 0));
LoadVector(location_light_color[i], glm::vec3(0, 0, 0));
LoadVector(location_light_attenuation[i], glm::vec3(1, 0, 0));
}
}
}
void EntityShader::LoadShineVariables(float shine_damper, float reflectivity) const
{
LoadFloat(location_shine_damper, shine_damper);

View File

@@ -2,6 +2,7 @@
#include <glm/gtc/matrix_transform.hpp>
#include <vector>
#include <deque>
#include "shader_program.h"
#include "../entities/camera.h"
#include "../entities/light.h"
@@ -58,6 +59,13 @@ namespace shaders
*/
void LoadLights(std::vector<entities::Light>& lights) const;
/**
* @brief loads some lights contained in a deque.
*
* @param lights the deque containing the lights to load
*/
void LoadLightsDeque(std::deque<entities::Light>& lights) const;
/*
* @brief: A method to load the the shine variables from a model into the shader
*

View File

@@ -1,3 +1,4 @@
#include <ctime>
#include "toolbox.h"
namespace toolbox
@@ -31,4 +32,28 @@ namespace toolbox
matrix = glm::translate(matrix, negative_cam_pos);
return matrix;
}
float Lerp(float from, float to, float amount)
{
return from + amount * (to - from);
}
glm::vec3 Lerp(glm::vec3 from, glm::vec3 to, float amount)
{
glm::vec3 final;
final.x = Lerp(from.x, to.x, amount);
final.y = Lerp(from.y, to.y, amount);
final.z = Lerp(from.z, to.z, amount);
return final;
}
int Random(const int min, const int max)
{
static bool first = true;
if (first)
{
srand(time(0));
first = false;
}
return min + rand() % ((max + 1) - min);
}
}

View File

@@ -46,4 +46,36 @@ namespace toolbox
* @return: The view matrix
*/
glm::mat4 CreateViewMatrix(entities::Camera& camera);
/*
* @biref go to one coordinate to another with smooting
*
* @param from one coordinate of the start
* @param to one coordinate of where to go
* @param amount the amount of smoothing (lower is smoother)
*
* @return coordinate of where to go
*/
float Lerp(float from, float to, float amount);
/*
* @biref go from one position to another with smoothing
*
* @param from position of the start
* @param to position of where to go
* @param amount the amount of smoothing (lower is smoother)
*
* @return position of where to go
*/
glm::vec3 Lerp(glm::vec3 from, glm::vec3 to, float amount);
/*
* @brief: This function will return a value between min and max
*
* @param min: The min value
* @param max: The max value
*
* @return: The random number
*/
int Random(const int min, const int max);
}