//-*-c++-*-
// OpenCV Face Detection Application: FaceTracking.h
#ifndef INCLUDED_FaceTracking_h_
#define INCLUDED_FaceTracking_h_

#include <stdio.h> 
#include <stdlib.h>
#include <cstring>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>
#include <iostream>
#include <sstream>

#include "cv.h"
#include "cxcore.h"
#include "highgui.h"

// Include tekkotsu files
#include "DualCoding/DualCoding.h"
#include "Behaviors/StateMachine.h"
#include "Events/EventRouter.h"

#include "Motion/MotionPtr.h"
#include "Motion/MMAccessor.h"
#include "Motion/PIDMC.h"
#include "Motion/HeadPointerMC.h"
#include "Motion/Kinematics.h"
#include "Motion/PostureMC.h" 
#include "IPC/SharedObject.h"
#include "Shared/Config.h"
#include "Shared/ERS7Info.h"
#include "Shared/fmat.h"
#include "SiftTekkotsu.h"

// Include files for drawing into the camera frame
#include "Shared/ProjectInterface.h"
#include "Vision/Graphics.h"
#include "Events/FilterBankEvent.h"
#include "Vision/RawCameraGenerator.h"
#include "Behaviors/Mon/RawCamBehavior.h"
#include "Wireless/LGmixin.h"

using namespace DualCoding;

// Predefined Tekkotsu "Raw Cam" image width and height
#define imgHeight 240
#define imgWidth 320

// Create memory storage for Haar features used in calculations
static CvMemStorage* storage = 0;
static CvMemStorage* leftEyeStorage = 0;
static CvMemStorage* rightEyeStorage = 0;

// Create global Haar classifiers, the more the merrier
static CvHaarClassifierCascade* cascade = 0;
static CvHaarClassifierCascade* cascade1 = 0;
static CvHaarClassifierCascade* cascade2 = 0;
static CvHaarClassifierCascade* cascade3 = 0;
static CvHaarClassifierCascade* cascade4 = 0;

// Create global Haar classifiers for left and right eyes
static CvHaarClassifierCascade* leftEyeCascade = 0;
static CvHaarClassifierCascade* rightEyeCascade = 0;

/* File path for Haar cascades containing faces.  Note these must be setup on
 * the chiara for the program to work */
const char* cascade_name = 
  "/home/student/project/haarcascade_frontalface_alt.xml";
const char* cascade_name2 = 
  "/home/student/project/haarcascade_frontalface_default.xml";
const char* cascade_name3 = 
  "/home/student/project/haarcascade_frontalface_alt2.xml";
const char* cascade_name4 = 
  "/home/student/project/haarcascade_profileface.xml";

// File path for Haar cascades containing eyes 
const char* leftEye = "/home/student/project/ojoL.xml";
const char* rightEye = "/home/student/project/ojoR.xml";

// Global pointer to detected faces
static CvSeq * faces = 0;

// Pointer to detected eyes
static CvSeq * leftEyeSeq = 0;
static CvSeq * rightEyeSeq = 0;

/* Constant used to check the number of pictures that have been uploaded to the
 * Looking Glass software */
static int picturesUploaded = 0;

/* Used for converting object into string, eg int to string */
template <class T>
inline string to_string (const T& t)
{
  stringstream ss;
  ss << t;
  return ss.str();
}

/* Program for face detection, recognition, tracking and Human-computer interaction */
class FaceTracking : public StateNode, public VRmixin {
 public:
  enum MethodState { notFound, load, error };

  /* Internal class representation of a face. Includes the HSV
   * image (which is used to create the histogram) and the histogram
   * as well as other global constants such as the previous face 
   * position denoted by a rectangle. */
  class Face {
  public:
    int nHistBins;
    int vmin;
    int vmax;
    int smin;
    int nFrames;
    float scaleFaceYFactor;
    float ranges[2];
    float * pRanges;
    IplImage * pHSVImg;
    IplImage * pHueImg;
    IplImage * pMask;
    IplImage * pProbImg;
    CvHistogram * pHist;
    CvRect prevFaceRect;
    // Constructor for a face
    Face(){

      // Initialize local constants
      nHistBins = 30;
      vmin = 65;
      vmax = 256;
      smin = 55;
      nFrames = 0;
      ranges[0] = 0;
      ranges[1] = 180;
      pRanges = ranges;

      // Set the proportion constants for face width and height
      scaleFaceYFactor = 0;

      // Create IplImages in memory to store data we will work with
      pHSVImg = cvCreateImage( cvSize(imgWidth, imgHeight), 8, 3 );
      pHueImg = cvCreateImage( cvSize(imgWidth, imgHeight), 8, 1 );
      pMask = cvCreateImage( cvSize(imgWidth, imgHeight), 8, 1 );
      pProbImg = cvCreateImage( cvSize(imgWidth, imgHeight), 8, 1 );
      pHist = cvCreateHist( 1, &nHistBins, CV_HIST_ARRAY, &pRanges, 1 );
    }
    
    // Destructor for a face, called when the object gets deleted
    ~Face(){
      //Release dynamically allocated data using openCV functions
      cvReleaseImage( &pHSVImg );
      cvReleaseImage( &pHueImg );
      cvReleaseImage( &pMask );
      cvReleaseImage( &pProbImg );
      cvReleaseHist( &pHist );
    }
  };
  
  /**
   * Class that initializes the Haar classifier prior to face detection.
   * Note I used several cascades for better face detection.  We only need to
   * detect the face once so it's actually better to use several different cascades that
   * are specialized for particular views of the face than any one cascade.
   */
  class Init : public StateNode, public VRmixin {
  public:
  Init() : StateNode("Init"), 
	   VRmixin() {}
    virtual void DoStart(){
      // Load the HaarClassifierCascade
      cascade1 = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
      cascade2 = (CvHaarClassifierCascade*)cvLoad( cascade_name2, 0, 0, 0 );
      cascade3 = (CvHaarClassifierCascade*)cvLoad( cascade_name3, 0, 0, 0 );
      cascade4 = (CvHaarClassifierCascade*)cvLoad( cascade_name4, 0, 0, 0 );
      // Load the cascades for the left and right eyes
      leftEyeCascade  = 
	(CvHaarClassifierCascade*)cvLoad( leftEye, 0, 0, 0 );
      rightEyeCascade =
	(CvHaarClassifierCascade*)cvLoad( rightEye, 0, 0, 0 );
      // Set global face cascade to one of the cascades
      cascade = cascade1;

      // Check whether the cascade has loaded successfully.
      if( !cascade1 || !cascade2 || !cascade3 
	  || !cascade4 || !leftEyeCascade || !rightEyeCascade) {
	fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
	postStateSignal<unsigned int>(error);
      } else {
	// Allocate the memory storage
	storage = cvCreateMemStorage(0);
	// Allocate memory storage for left and right eyes
	leftEyeStorage = cvCreateMemStorage(0);
	rightEyeStorage = cvCreateMemStorage(0);
	postStateSignal<unsigned int>(load);
      }
    } 
  };
  
  /**
   * Class for detecting faces and putting a bounding rectangle around them
   * for tracking purposes.
   */
  class FindFaces : public StateNode, public VRmixin {
  public:
    /* Keep iterations variable to find out how many times tried to detect face.    *
     * Note we cannot just try to detect the face once since then the face detector *
     * will not work very well.                                                     */
    int iters;
    FindFaces() : StateNode("FindFaces"), 
		  VRmixin(), 
		  iters(0) {}

    // Detects faces in image
    int FaceDetect(const EventBase& e){
      IplImage *image = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 3);
      
      // Clear the memory storage which was used before
      cvClearMemStorage( storage );
      
      // Create the YUV image from camera frame
      Sketch<yuv> yuvImg(sketchFromYUV());
      
      // Convert YUV image to IplImage
      memcpy(image->imageData, yuvImg->getRawPixels(), imgHeight * imgWidth * 3);
      
      // Check whether the cascade is loaded, should not need this!
      if( cascade && leftEyeCascade && rightEyeCascade){
	// Detect the faces and store them in the sequence
	faces = cvHaarDetectObjects( image, cascade, storage,
				     1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
				     cvSize(50, 50) );
	if(faces->total == 0){
	  cout << "Did not find any faces.\n" << endl;
	  cvReleaseImage(&image);
	  return 0;
	}else {
	  // Loop the number of faces found, draw square for each face.
	  for(int i = 0; i < (faces ? faces->total : 0); i++ ) {
	    cout << "Found a face in the image." << endl;
	    // For each image draw a rectangle
	    CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
	    // Draw the image to the frame
	    const FilterBankEvent& fbe = dynamic_cast<const FilterBankEvent&>(e);
	    unsigned chan=RawCameraGenerator::CHAN_Y;
	    unsigned int layer=RawCamBehavior::getSourceLayer(chan,fbe.getNumLayers());
	    Graphics g(*fbe.getSource(), layer, chan);
	    g.setColor(255);
	    g.drawRect(r->x,r->y,r->width,r->height);
	  }
	  // Release image memory
	  cvReleaseImage(&image);
	  return 1;
	}
      } else {
      // Release image memory
	cvReleaseImage(&image);
	return -1;
      }
    }

    virtual void DoStart() {
      // Set iterations to 0 upon each entry to state node, not just in class constructor
      iters = 0;
      // Load the HaarClassifierCascade
      cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
      
      // Check whether the cascade has loaded successfully. Else report an error and post completion.
      if( !cascade ) {
	fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
	postStateSignal<unsigned int >(error);
      } else {
	// Allocate the memory storage
	storage = cvCreateMemStorage(0);
	
	// Listen to visRawCameraEGID to find out when new image is posted for processing
	erouter->addListener(this,EventBase::visRawCameraEGID,
			     ProjectInterface::visRawCameraSID,EventBase::statusETID); 
      }
    }
    
    void processEvent(const EventBase& e){
      if(e.getGeneratorID() == EventBase::visRawCameraEGID){
	// Go through 30 iterations in order to find the face
	if(iters < 30){
	  // Change cascade files upon every iteration to detect face faster
	  if(iters % 4 == 0)
	    cascade = cascade1;
	  else if(iters % 4 == 1)
	    cascade = cascade2;
	  else if(iters % 4 == 2)
	    cascade = cascade3;
	  else if(iters %4 == 3)
	    cascade = cascade4;
	  int ret = FaceDetect(e);
	  // First check if the face detector found a face or had error
	  if(ret){
	    // If it found a face go to next state
	    if(ret > 0){
	      // Post state signal to recognize/follow face
	      postStateSignal<unsigned int >(load);
	    } else {
	      // Post state signal for error in finding cascade
	      postStateSignal<unsigned int >(error);
	    } 
	  }
	  iters++;
	} else {
	  // After going through all iterations can be sure face is not in the image
	  // Post state signal since no faces found
	  postStateSignal<unsigned int >(notFound);
	}
      }
    }
  };
  
  /**
   *  Class for moving the head to position moveX, moveY, moveZ.
   *  Note this is only used for looking around since the positions
   *  must be known before the state is initialized.
   */
  class MoveHead : public HeadPointerNode {
  public:
    int moveX, moveY, moveZ;
    MoveHead(int mX, int mY, int mZ) : HeadPointerNode("MoveHead"), 
				       moveX(mX), 
				       moveY(mY), 
				       moveZ(mZ), 
				       headPointer() {}
    virtual void DoStart() {
      headPointer->lookAtPoint(moveX, moveY, moveZ);
    }
  protected:
    MotionPtr<HeadPointerMC> headPointer;
  };

  /**
   *  Class for recognizing the face of the person.  The function
   *  Recognizes the face if it is in the current database.
   *  At present this function is VERY SLOW, although it works.
   *  It would be best to send the image to an external server
   *  that would recognize the face.
   *  Note I did not rescale the image, hence the class
   *  only works properly if the face is very large.  In the
   *  future the face of the person will have to be rescaled. 
   */
  /*class RecognizeFace : public StateNode, public VRmixin {
  protected:
    SiftTekkotsu mySiftTekkotsu;
    // Path to database on Chiara
    string path;
  public:
    RecognizeFace() : StateNode("RecognizeFace"), 
		 VRmixin(), 
		 path("/home/student/project/SIFT") {}
    
    virtual void DoStart() {
      cout << "In sift match" << endl;
      bool matchFound = false;

      // Create gray image buffer and get gray bits from camera YUV image
      Sketch<unsigned char> grayImg(sketchFromRawY());
      ImageBuffer grayBuf;
      grayBuf.width = imgWidth;
      grayBuf.height = imgHeight;
      grayBuf.byteArray = new unsigned char[76800];
      memcpy(grayBuf.byteArray, grayImg->getRawPixels(), 76800);
      if(picturesUploaded == 0){
	cout << "Creating new database..." << endl;
	mySiftTekkotsu.setParameter("probOfMatch", 0.9);
	mySiftTekkotsu.train_addNewObject(grayBuf);
	// Create database: ************VERY SLOW*************
	mySiftTekkotsu.saveToFile((path + "Faces.kb").c_str(), true);
      } else {
	mySiftTekkotsu.loadFile((path + "Faces.kb").c_str());
	cout << "Checking matches..." << endl;
	vector<SiftMatch*> matches;
	mySiftTekkotsu.findAllObjectsInImage(grayBuf, matches);
	if(matches.size() == 0) {
	  cout << "No matches found.  Creating new object" << endl;
	  mySiftTekkotsu.train_addNewObject(grayBuf);
	  mySiftTekkotsu.saveToFile((path + "Faces.kb").c_str(), true);
	} else {
	  matchFound = true;
	  cout << "Match found!  Adding instance to database." << endl;
	  mySiftTekkotsu.train_addToObject(matches[0]->objectID,
					   grayBuf);
	  for(int i = 0; i < matches.size(); i++)
	    delete matches[i];
	}
      }
      // Free memory
      delete [] grayBuf.byteArray;
      if(matchFound)
	postStateSignal<unsigned int >(load);
      else
	postStateSignal<unsigned int >(notFound);
    }
    }; */
  
  /* This is a demo of how we can interface face tracking and face recognition
   * with tekkotsu, especially the Looking Glass software. My demo creates
   * a shape and a sketch of the current image (after the face was recognized)
   * and it uploads the current and all past images to the server. */
  class LGDemo : public StateNode, public VRmixin, public LGmixin {
  public:
    LGDemo() : StateNode("LGDemo"), 
	       VRmixin(), 
	       LGmixin() {}

    virtual void DoStartEvent(const EventBase &event) {
      /* Listen to visRawCameraEGID to find out when new image is posted for processing
       * We do this so that when we draw the rectangle on the picture we post it does not
       * conflict with the rectangle drawn by face detect (to avoid getting the same camera frame) */
      erouter->addListener(this,EventBase::visRawCameraEGID,ProjectInterface::visRawCameraSID,EventBase::statusETID); 
    }
    void processEvent(const EventBase& e){
      if(e.getGeneratorID() == EventBase::visRawCameraEGID){
	for(int i = 0; i < (faces ? faces->total : 0); i++ ) {
	  CvRect * pFaceRect = (CvRect*)cvGetSeqElem( faces, i );
	  /* Set the x, y coordinates and rescale the width and height to include
	   * a larger portion of the person's face */
	  int x1 = pFaceRect->x;
	  int x2 = pFaceRect->x+pFaceRect->width*1.05f;
	  int y1 = pFaceRect->y;
	  int y2 = pFaceRect->y+pFaceRect->height*1.05f;
	  
	  Point topLeft(x1, y1, 0, camcentric);
	  Point topRight(x2, y1, 0, camcentric);
	  Point bottomLeft(x1, y2, 0, camcentric);
	  Point bottomRight(x2, y2, 0, camcentric);
	  camSkS.clear();
	  camShS.clear();
	  NEW_SHAPE(faceBox, BlobData, new BlobData(camShS, topLeft, topRight, bottomLeft, bottomRight, camcentric));
	  NEW_SKETCH(face, yuv, sketchFromYUV());
	}
	string faceNumber = to_string(picturesUploaded);
	uploadCameraImage("face" + faceNumber + ".jpg");

	picturesUploaded++;
	cout << "Pictures uploaded: " << picturesUploaded << endl;

	/* Generate the html code that will be displayed in the demo */
	string htmlCode = "<html><body><p>Hello!  Welcome to tekkotsu!"
	  "This is a Looking Glass demo of what the face"
	  "tracker can do.&#10;&#13;</p>";
	
	for(int i = 0; i < picturesUploaded; i++){
	  string index = to_string(i);
	  htmlCode += "<img src=\"face" + index + ".jpg\">";
	}
	string total = to_string(picturesUploaded);
	htmlCode += "<p>&#10;&#13;There are: " + total + " pictures uploaded</p></body></html>";

	// Now display the current picture and all past uploaded pictures
	displayHtmlText(htmlCode);
	postStateCompletion();
      }
    }
  };
 
  /* The Chiara greets the human upon first contact */
  class Greeting : public ArmNode {
  public:
    Greeting() : ArmNode("Greeting") {}
    virtual void DoStart() {
      cout << "Greeting human!" << endl;
      sndman->speak("Hello human.  Good day to you.");
      getMC()->setMaxSpeed(0, getMC()->getMaxSpeed(0)/1.2f);
      getMC()->setJoints(1.1695f, -0.0230f, -1.4459f);
      erouter->addListener(this, EventBase::motmanEGID, getMC()->getID(), EventBase::statusETID);
    }
    void processEvent(const EventBase& e){
      if(e.getGeneratorID() == EventBase::motmanEGID){
	// Make the arm "wave" a hello to the human by sending the arm back to other side
	getMC()->setMaxSpeed(0, getMC()->getMaxSpeed(0)/1.2f);
	getMC()->setJoints(0.0f, 0.0f, -1.4449f);
	postStateCompletion();
      }
    }
  };
  
  
  /**
   * Here we use CvCamShift to follow the face as it is moving in space.
   * Note that as the face moves in space we follow the largest face
   * via the head of the Chiara.  After some amount of time the method
   * must terminate so that the Chiara can run its face detector again, or
   * if no faces are present in the image.  I also added an eye detector,
   * which is relatively fast since we are looking within a small, bounded
   * region of the image.
   */
  class FollowFace : public StateNode, public VRmixin {
  public:
    int iters;
    CvRect *pFaceRect;
    Face *Faces;
      
    /* Detect eyes within the given boundary and 
     * draw rectangles around them */
    int EyeDetect(Graphics *g, CvRect *face, IplImage *image){
      
      // Clear the memory storage which was used before
      cvClearMemStorage( leftEyeStorage );
      cvClearMemStorage( rightEyeStorage );
      
      // Check to see if cascades are loaded, should not need this!
      if(leftEyeCascade && rightEyeCascade){
	
	// Use the values of the rectangle to estimate the relative position of eyes.
	// Note that I used the width of the face since that is the most accurate value.
	int ypos = face->y+(face->width*0.15f);
	int yheight = face->width*0.72f;
	CvRect eyeWindow = cvRect(face->x, ypos, face->width, yheight);
	
	// First resize the given image to just include the eyes
	cvSetImageROI(image, eyeWindow);
	
	// Search through the image for the left eye, notice limit is small (5x5)
	leftEyeSeq = cvHaarDetectObjects( image, leftEyeCascade, leftEyeStorage,
					  1.1, 2, 
					  CV_HAAR_DO_CANNY_PRUNING,
					  cvSize(5, 5) );

	// Search through the image for the right eye, notice limit is small (5x5)
	rightEyeSeq = cvHaarDetectObjects( image, rightEyeCascade, rightEyeStorage,
					   1.1, 2, 
					   CV_HAAR_DO_CANNY_PRUNING /*| CV_HAAR_FIND_BIGGEST_OBJECT*/,
					   cvSize(5, 5) );
	cvResetImageROI(image);

	// Display the results to the frame
	g->setColor(180);
	
	/* If cannot find any eyes, print error message.  If can find
	 * one of the eyes, draw it.  If can find both of the eyes, choose
	 * the eyes to display properly so that they are not the same eye */
	if(!leftEyeSeq->total && !rightEyeSeq->total) {

	  cout << "Could not find the right and left eyes" << endl;

	} else if(!leftEyeSeq->total && rightEyeSeq->total){

	  CvRect* rightEyeRect = (CvRect*)cvGetSeqElem( rightEyeSeq, 0 );
	  g->drawRect(eyeWindow.x+rightEyeRect->x, eyeWindow.y+rightEyeRect->y, 
		      rightEyeRect->width, rightEyeRect->height);

	} else if(leftEyeSeq->total && !rightEyeSeq->total){

	  CvRect* leftEyeRect = (CvRect*)cvGetSeqElem( leftEyeSeq, 0 );
	  g->drawRect(eyeWindow.x+leftEyeRect->x, eyeWindow.y+leftEyeRect->y, 
		      leftEyeRect->width, leftEyeRect->height);

	} else {

	  CvRect* finalLeftEye;
	  CvRect* finalRightEye;
	  // Make sure both eyes do not intersect!
	  for(int i = 0; i < leftEyeSeq->total; i++){
	    CvRect* leftEyeRect = (CvRect*)cvGetSeqElem( leftEyeSeq, i );
	    for(int j = 0; j < rightEyeSeq->total; j++){
	      CvRect* rightEyeRect = (CvRect*)cvGetSeqElem( rightEyeSeq, j );
	      /* Check to see if the eyes intersect.  If they do, choose the one that
	       * is on the correct side of the face.  Note height does not matter in this case. */
	      int r1 = rightEyeRect->x, r2 = rightEyeRect->x+rightEyeRect->width;
	      int l1 = leftEyeRect->x, l2 = leftEyeRect->x+leftEyeRect->width;
	      // Case 1: Eyes do not intersect, best and most common case.  Just draw and return.
	      if((r1 > l1 && r1 > l2 && r2 > l2 && r2 > l1) 
		 || (r1 < l1 && r1 < l2 && r2 < l2 && r2 < l1)){
		//Draw right eye
		g->drawRect(eyeWindow.x+rightEyeRect->x, eyeWindow.y+rightEyeRect->y, 
			    rightEyeRect->width, rightEyeRect->height);
		//Draw left eye
		g->drawRect(eyeWindow.x+leftEyeRect->x, eyeWindow.y+leftEyeRect->y, 
			    leftEyeRect->width, leftEyeRect->height);
		//Exit
		return 0;
	      }
	      // Case 2: Eyes intersect, set the incorrect one to NULL based on region of face we are in
	      else if(l1 < r1 && l1 < r2 && r1 < l2 && l2 < r2){
		// Left eye is to the left of the right eye, choose left eye
		finalLeftEye = leftEyeRect;
		finalRightEye = NULL;
	      } else if(l1 > r1 && l1 > r2 && r1 > l2 && l2 > r2){
		// Left eye is to the right of right eye, choose right eye
		finalLeftEye = NULL;
		finalRightEye = rightEyeRect;
	      }
	      // Case 3: One eye is inside the other.  Just choose one.
	      else{
		int diam2 = eyeWindow.width/2.0f;
		if(r1 < diam2){
		  finalRightEye = NULL;
		  finalLeftEye = leftEyeRect;
		}
		else {
		  finalRightEye = rightEyeRect;
		  finalLeftEye = NULL;
		}
	      }
	    }
	  }
	  // If both eyes not found, just use the last case (make things simple).
	  if(finalRightEye != NULL){
	    g->drawRect(eyeWindow.x+finalRightEye->x, eyeWindow.y+finalRightEye->y, 
			finalRightEye->width, finalRightEye->height);
	  }
	  if(finalLeftEye != NULL){
	    g->drawRect(eyeWindow.x+finalLeftEye->x, eyeWindow.y+finalLeftEye->y, 
			finalLeftEye->width, finalLeftEye->height);
	  }
	}
	return 0;
      } else {
	cout << "Error: No eye cascade present!" << endl;
	return -1;
      }
    }
    
    FollowFace() : StateNode("FollowFace"),
		   VRmixin(),	 
		   iters(0),
		   headMover() {}

    virtual void DoStart(){
      addMotion(headMover);
      iters = 0; // Reinit iters to 0 not only at node init but DoStart as well

      cout << "Starting Tracker...\n" << endl;

      /* Step 1: Get Image */
      /* Image stored for processing.  The openCV image    *
       * corresponds to camera's width and height.  Image  *
       * must be released after being processed.           */
      IplImage *image = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 3);
      IplImage *temp = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 3);

      // Create the YUV image from camera frame
      Sketch<yuv> yuvImg(sketchFromYUV());

      // Copy data
      memcpy(temp->imageData, yuvImg->getRawPixels(), imgHeight * imgWidth * 3);

      /* Convert YUV image to IplImage. Note exact pixel values do not 
       * matter only format does, so even though YCrCb is not exactly YUV
       * this still works (there is no openCV YUV conversion) */
      cvCvtColor(temp, image, CV_YCrCb2BGR);

      // Initialize array of faces for processing
      if(faces){
	Faces = new Face[faces->total];
      }else { // This is impossible... but just in case!
	cout << "No faces found" << endl;
	postStateCompletion();
      }

      cout << "Initialized array, going through each face..." << endl;

      // Go through each of the given rectangles and create a face
      for(int i = 0; i < (faces ? faces->total : 0); i++ ) {

	// Get the rectangle from Face Detect method
	pFaceRect = (CvRect*)cvGetSeqElem( faces, i );
	float maxVal = 0.f;
	
	// Set the proportion constants for face width and height
	Faces[i].scaleFaceYFactor = pFaceRect->height/pFaceRect->width;

	cout << "Creating hue image..." << endl;

	/* Step 2: Create hue image */
	// Convert to HSV color model
	cvCvtColor( image, Faces[i].pHSVImg, CV_BGR2HSV );
	
	// Mask out-of-range values
	cvInRangeS( Faces[i].pHSVImg, 
		    cvScalar(0, Faces[i].smin, MIN(Faces[i].vmin, Faces[i].vmax), 0),
		    cvScalar(180, 256, MAX(Faces[i].vmin, Faces[i].vmax) ,0), Faces[i].pMask );
	
	// Extract the hue channel
	cvSplit( Faces[i].pHSVImg, Faces[i].pHueImg, 0, 0, 0 );
	
	// Create a histogram representation for the face
	cvSetImageROI( Faces[i].pHueImg, *pFaceRect );
	cvSetImageROI( Faces[i].pMask,   *pFaceRect );
	cvCalcHist( &Faces[i].pHueImg, Faces[i].pHist, 0, Faces[i].pMask );
	cvGetMinMaxHistValue( Faces[i].pHist, 0, &maxVal, 0, 0 );
	cvConvertScale( Faces[i].pHist->bins, Faces[i].pHist->bins, maxVal? 255.0/maxVal : 0, 0 );
	cvResetImageROI( Faces[i].pHueImg );
	cvResetImageROI( Faces[i].pMask );
	
	// Store the previous face location
	Faces[i].prevFaceRect = *pFaceRect;
      }

      cout << "Releasing images" << endl;

      cvReleaseImage(&image);
      cvReleaseImage(&temp);
      
      // Listen to visRawCameraEGID to draw a box on the camera delimiting area of face
      erouter->addListener(this, EventBase::visRawCameraEGID, 
			   ProjectInterface::visRawCameraSID, EventBase::statusETID); 
      
    }

    virtual void processEvent(const EventBase &e){
      if(e.getGeneratorID() == EventBase::visRawCameraEGID){
	cout << "In process event" << endl;

	IplImage *image = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 3);
		
	// Create the YUV image from camera frame
	Sketch<yuv> yuvImg(sketchFromYUV());
	
	IplImage *temp = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 3);
	
	// Convert YUV image to IplImage
	memcpy(temp->imageData, yuvImg->getRawPixels(), imgHeight * imgWidth * 3);
	
	// Note we don't care whether the values are the same for YUV and YCrCb, only format!
	cvCvtColor(temp, image, CV_YCrCb2BGR);

	// Largest rectangle in the image; the camera tracks this rectangle
	CvRect *largest;
	// Largest rectangle chosen based on width, since height is not always accurate
	int largestWidth = 0;
	
	/***** TRACKING ******/
	// For each of the rectangles...
	for(int i = 0; i < (faces ? faces->total : 0); i++ ) {
	  CvConnectedComp components;
	
	  /* Step 2: Create hue image */
	  // Convert to HSV color model
	  cvCvtColor( image, Faces[i].pHSVImg, CV_BGR2HSV );
	  
	  // Mask out-of-range values
	  cvInRangeS( Faces[i].pHSVImg, 
		      cvScalar(0, Faces[i].smin, MIN(Faces[i].vmin,Faces[i].vmax), 0),
		      cvScalar(180, 256, MAX(Faces[i].vmin, Faces[i].vmax) ,0), Faces[i].pMask );
	  
	  // Extract the hue channel
	  cvSplit( Faces[i].pHSVImg, Faces[i].pHueImg, 0, 0, 0 );
	
	  // Create a probability image based on the face histogram
	  cvCalcBackProject( &Faces[i].pHueImg, Faces[i].pProbImg, Faces[i].pHist );
	  cvAnd( Faces[i].pProbImg, Faces[i].pMask, Faces[i].pProbImg, 0 );
	
	  // Use CamShift to find the center of the new face probability
	  cvCamShift( Faces[i].pProbImg, Faces[i].prevFaceRect,
		      cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
		      &components, 0);
	  
	  // Update face location
	  Faces[i].prevFaceRect = components.rect;
	  
	  CvRect* r = &Faces[i].prevFaceRect;
	  cout << "iter: " << iters << " ry: " << r->y << " rx: " << r->x << endl;

	  if(largestWidth < r->width){
	    largestWidth = r->width;
	    largest = r;
	  }

	  // Draw box around face
	  const FilterBankEvent& fbe = dynamic_cast<const FilterBankEvent&>(e);
	  unsigned chan=RawCameraGenerator::CHAN_Y;
	  unsigned int layer=RawCamBehavior::getSourceLayer(chan,fbe.getNumLayers());
	  Graphics g(*fbe.getSource(), layer, chan);
	  g.setColor(255);

	  // The drawn rectangle height must be proportional to the width of the head
	  g.drawRect(r->x,r->y,(int)(r->width*1.1f),
		     (int)((r->width*1.1f)*Faces[i].scaleFaceYFactor*1.1f));
	  
	  int error = EyeDetect(&g, r, image);
	  if(error)
	    postStateSignal<unsigned int >(error);

	}
	/* Similar to StareAtBallBehavior.cc, we stare at the upper left part of the face.
	 * Note that we always stare at the largest rectangle in the image */
	if(!headMover->isDirty()){
	  double vert = (largest->y - 120.0f)/120.0f;
	  double horiz = (largest->x - 160.0f)/160.0f;

	  // Get current pan and tilt values
	  const unsigned int panIdx = 
	    capabilities.findOutputOffset(ERS7Info::outputNames[ERS7Info::HeadOffset
								+ERS7Info::PanOffset]);
	  const unsigned int tiltIdx = 
	    capabilities.findOutputOffset(ERS7Info::outputNames[ERS7Info::HeadOffset
								+ERS7Info::TiltOffset]);

	  // Move the head a small distance in the direction of the target
	  double tilt=state->outputs[tiltIdx]-vert*CameraFOV/7;
	  double pan=state->outputs[panIdx]-horiz*CameraFOV/6;
	  
	  /* Set the joints directly */
	  if(NumHeadJoints>2)
	    tilt/=2; // we're going to replicate the tilt parameter in the next call, so divide by 2
	  headMover->setJoints(tilt,pan,tilt);
	}

	/* Release images stored for openCV */
	cvReleaseImage(&image);
	cvReleaseImage(&temp);

	/* After 50 iterations run through the Face Detection algorithm again */
	if(iters > 50){
	  postStateSignal<unsigned int >(load);
	  cout << "Detecting face again" << endl;
	}
	iters++;
      }
    }
    
    virtual void DoStop(){
      // Release all tracker resources
      cout << "Destroying Faces" << endl;
      delete [] Faces;
    }
  protected:
    MotionPtr<HeadPointerMC> headMover;
  };
  
  /* Main method that sets up the state machine for the face tracker */
  FaceTracking() : StateNode("FaceTracking"), VRmixin() {}
  virtual void setup() {
    /* Initialize the values for looking around ahead of time */
    int moveX1 = 1000, moveY1 = 0, moveZ1 = 400;
    int moveX2 = 350, moveY2 = 300, moveZ2 = 600;
    int moveX3 = 220, moveY3 = -80, moveZ3 = 600;
    int moveX4 = 350, moveY4 = -300, moveZ4 = 300;
    
#statemachine
// ******STATES*******

init: Init()

findFaces1: FindFaces()

findFaces2: FindFaces()
      
findFaces3: FindFaces()

findFaces4: FindFaces()

findFaces5: FindFaces()

// Wait states between transitions

waitFinal: StateNode

wait1: StateNode

wait2: StateNode

wait3: StateNode

wait4: StateNode

// If error occurs, end program

errorNode: StateNode

// Greeting for when the Chiara sees a person

greetingNode: Greeting()

// Four states for moving the camera to find the head

moveHead1: HeadPointerNode[getMC()->lookAtPoint(moveX1, moveY1, moveZ1);]

moveHead2: HeadPointerNode[getMC()->lookAtPoint(moveX2, moveY2, moveZ2);]

moveHead3: HeadPointerNode[getMC()->lookAtPoint(moveX3, moveY3, moveZ3);]

moveHead4: HeadPointerNode[getMC()->lookAtPoint(moveX4, moveY4, moveZ4);]

// One state for following the face

followFace: FollowFace()

// One state for recognizing the face, in this version I just use the demo

//recogFace: RecognizeFace()

recogFace: LGDemo()

// *******TRANSITIONS*******

startnode: StateNode =N=> init

init =S<unsigned int >(load)=> findFaces1

init =S<unsigned int >(error)=> errorNode

// If found face, recognize it

findFaces1 =S<unsigned int >(load)=> {recogFace, greetingNode}

findFaces2 =S<unsigned int >(load)=> {recogFace, greetingNode}

findFaces3 =S<unsigned int >(load)=> {recogFace, greetingNode}

findFaces4 =S<unsigned int >(load)=> {recogFace, greetingNode}

findFaces5 =S<unsigned int >(load)=> {recogFace, greetingNode}

// After recognizing the face, follow it

recogFace =C=> followFace

  //recogFace =S<unsigned int >(notFound)=> lookingGlassDemo
  //recogFace =S<unsigned int >(load)=> followFace

followFace =S<unsigned int >(load)=> findFaces1

// Otherwise, move the head

findFaces1 =S<unsigned int >(notFound)=> moveHead1

findFaces2 =S<unsigned int >(notFound)=> moveHead2

findFaces3 =S<unsigned int >(notFound)=> moveHead3

findFaces4 =S<unsigned int >(notFound)=> moveHead4

findFaces5 =S<unsigned int >(notFound)=> waitFinal

// If error, end statemachine

findFaces1 =S<unsigned int >(error)=> errorNode

findFaces2 =S<unsigned int >(error)=> errorNode

findFaces3 =S<unsigned int >(error)=> errorNode

findFaces4 =S<unsigned int >(error)=> errorNode

findFaces5 =S<unsigned int >(error)=> errorNode

// After moving the head, try to find face again

moveHead1 =C=> wait1

moveHead2 =C=> wait2

moveHead3 =C=> wait3

moveHead4 =C=> wait4

//After waiting four seconds try to find the face again

wait1 =T(4000)=> findFaces2
		
wait2 =T(4000)=> findFaces3

wait3 =T(4000)=> findFaces4

wait4 =T(4000)=> findFaces5

// If none found, wait 10 seconds before trying to find a face again

waitFinal =T(10000)=> init

#endstatemachine
      
      }
};

#endif
