Skip to content
Snippets Groups Projects
Commit ee803b16 authored by shreyasb's avatar shreyasb
Browse files

added map_and_send

parent 97531870
Branches
No related tags found
No related merge requests found
......@@ -23,8 +23,6 @@ def main():
print("Calibration failed!")
exit(1)
tobiiglasses.start_streaming()
eye_data = []
AOIblinks = [0] * 4
......@@ -74,7 +72,7 @@ def main():
print("Succesfully connected to webcam...")
else:
print("Could not connect to camera...")
tobiiglasses.stop_streaming()
# tobiiglasses.stop_streaming()
sys.exit(1)
background = cv2.imread('background36h11.jpg')
......@@ -86,7 +84,7 @@ def main():
if len(back_detections) < num_apriltags:
print("Could not find all tags in background image, only found:", len(back_detections))
tobiiglasses.stop_streaming()
# tobiiglasses.stop_streaming()
sys.exit(1)
fix = {}
......@@ -127,6 +125,7 @@ def main():
P2 = np.dot(P2, config.Rigid_body)
start = time.time()
tobiiglasses.start_streaming()
while True:
comp_start_time = time.time()
......
from pupil_apriltags import Detector
from cv2 import cv2
import sys
import numpy as np
from tobiiglassesctrl import TobiiGlassesController
import time
import math
import config
import socket
def main():
# iMotions connection settings
iMotions_ip = '192.168.71.60'
port = 8125
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.connect((iMotions_ip, port))
tobii_ip = '192.168.71.50'
tobiiglasses = TobiiGlassesController(tobii_ip, video_scene=True)
project_id = tobiiglasses.create_project("Test live_scene_and_gaze.py")
participant_id = tobiiglasses.create_participant(project_id, "participant_test")
calibration_id = tobiiglasses.create_calibration(project_id, participant_id)
print(tobiiglasses.get_battery_status())
input("Put the calibration marker in front of the user, then press enter to calibrate")
tobiiglasses.start_calibration(calibration_id)
res = tobiiglasses.wait_until_calibration_is_done(calibration_id)
if res is False:
print("Calibration failed!")
exit(1)
camera_rate = 30
num_apriltags = 15
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
cap.set(cv2.CAP_PROP_FPS, camera_rate)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # set the resolution
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
if cap.isOpened():
print("Succesfully connected to webcam...")
else:
print("Could not connect to camera...")
# tobiiglasses.stop_streaming()
sys.exit(1)
background = cv2.imread('background36h11.jpg')
background = cv2.resize(background, (640, 360))
gray_back = cv2.cvtColor(background, cv2.COLOR_RGB2GRAY)
detector = Detector(families='tag36h11', nthreads=1, quad_decimate=1.0)
back_detections = detector.detect(gray_back)
if len(back_detections) < num_apriltags:
print("Could not find all tags in background image, only found:", len(back_detections))
# tobiiglasses.stop_streaming()
sys.exit(1)
fix = {}
#######IDS NEED TO CHANGE#############
for detection in back_detections:
id_ = detection.tag_id
center = detection.center
if id_ == 8:
s1_min_x = center[0]
elif id_ == 11:
s1_min_y = center[1]
elif id_ == 14:
s1_max_y = center[1]
elif id_ == 12:
s1_max_x = center[0]
elif id_ == 0:
s2_min_x = center[0]
s2_max_y = center[1]
elif id_ == 1:
s2_min_y = center[1]
elif id_ == 3:
s2_max_x = center[0]
elif id_ == 4:
s3_min_x = center[0]
elif id_ == 5:
s3_min_y = center[1]
elif id_ == 7:
s3_max_x = center[0]
s3_max_y = center[1]
corners = detection.corners
point_list = list(corners) + [center]
fix[id_] = point_list
H = np.eye(3)
Ex = np.eye(4)
P2 = np.dot(config.K2, Ex)
P2 = np.dot(P2, config.Rigid_body)
# start = time.time()
tobiiglasses.start_streaming()
while True:
comp_start_time = time.time()
data = tobiiglasses.get_data()
# 3d gaze coordinate data
gaze_data = data['gp3']
# timestamp
ts = gaze_data['ts']
if 'gp3' in gaze_data.keys():
# 3d gaze coordinate
gaze3 = gaze_data['gp3']
else:
gaze3 = [-1] * 3
if len(gaze3) < 4: # Homogenizing
gaze3.append(1)
# pupil diameter
if 'pd' in data['left_eye']['pd'].keys():
pd_left = data['left_eye']['pd']['pd']
else:
pd_left = -1
if 'pd' in data['right_eye']['pd'].keys():
pd_right = data['right_eye']['pd']['pd']
else:
pd_right = -1
# Right eye pupil postion
Pc_right = data['right_eye']['pc']
if 'pc' in Pc_right.keys():
pc_right = Pc_right['pc']
else:
pc_right = [-1] * 3
# Left eye pupil postion
Pc_left = data['left_eye']['pc']
if 'pc' in Pc_left.keys():
pc_left = Pc_left['pc']
else:
pc_left = [-1] * 3
_, frame = cap.read()
gray_frame = cv2.resize(frame, (640, 360))
gray_frame = cv2.cvtColor(gray_frame, cv2.COLOR_BGR2GRAY)
result = detector.detect(gray_frame)
if not((-1 in gaze3) or (-1 in pc_left and -1 in pc_right) or (pd_left == -1 and pd_right == -1)):
if pd_right != -1 and pd_left != -1:
pd = 0.5 * (pd_right + pd_left)
elif pd_left == -1:
pd = pd_right
elif pd_right == -1:
pd = pd_left
gaze3[0] = -gaze3[0]
gaze3[1] = -gaze3[1]
gaze_screen = -1
gaze_mapped = np.dot(P2, gaze3) ## 3X1
gaze_mapped = gaze_mapped/gaze_mapped[2]
gaze_mapped[0] = gaze_mapped[0] / 1920*640 ## TODO: Need to check 1920 -> 640
gaze_mapped[1] = gaze_mapped[1] / 1080*360
H = get_hom(result, fix)
if H is not None:
gaze_mapped = np.dot(H, gaze_mapped)
gaze_mapped = gaze_mapped/gaze_mapped[2]
# cv2.circle(bg_copy, (int(gaze_mapped_back[0]), int(gaze_mapped_back[1])), 2, (0,0,255), 40)
if gaze_mapped[0] > s1_min_x and gaze_mapped[1] > s1_min_y and gaze_mapped[0] < s1_max_x and gaze_mapped[1] < s1_max_y:
## LOOKING AT SCREEN1######
print("Looking at screen 1", end='\r')
gaze_screen = 1
elif gaze_mapped[0] > s2_min_x and gaze_mapped[1] > s2_min_y and gaze_mapped[0] < s2_max_x and gaze_mapped[1] < s2_max_y: # and gaze_mapped[1] > 180:
# LOOKING AT SCREEN2 ######
print("Looking at screen 2", end='\r')
gaze_screen = 2
elif gaze_mapped[0] > s3_min_x and gaze_mapped[1] > s3_min_y and gaze_mapped[0] < s3_max_x and gaze_mapped[1] < s3_max_y:
# LOOKING AT SCREEN 3 ###
print("Looking at screen 3", end='\r')
gaze_screen = 3
else:
print("Looking at screen 0", end='\r')
gaze_screen = -1
msg = generate_message(ts, gaze3, gaze_mapped, pd, gaze_screen, pc_left, pc_right)
print(msg)
# s.send(msg)
elapsed_one_iter = time.time() - comp_start_time
if elapsed_one_iter < 1 / camera_rate:
time.sleep(1 / camera_rate - elapsed_one_iter)
def generate_message(ts, gaze3, gaze_mapped, pd, gaze_screen, pc_left, pc_right):
msg = 'E;1;EyeTrack;;;;EyeTrackerSample;'
msg += str(ts) + ';'
for i in range(3):
msg += str(gaze3[i]) + ';'
for i in range(2):
msg += str(gaze_mapped[i]) + ';'
msg += str(pd) + ';'
msg += str(gaze_screen) + ';'
for i in range(3):
msg += str(pc_left[i]) + ';'
for i in range(2):
msg += str(pc_right[i]) + ';'
msg += str(pc_right[2]) + '\r\n'
return msg
def get_hom(result, fix):
src_pts = np.empty((0, 2))
dst_pts = np.empty((0, 2))
H = None
for detection in result:
if detection.tag_id in fix:
center = detection.center
corners = detection.corners
point_list = list(corners) + [center]
src_pts = np.append(src_pts, point_list, axis=0)
dst_pts = np.append(dst_pts, np.array(fix[detection.tag_id]), axis=0)
if src_pts.shape[0] > 0:
H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return H
if __name__ == "__main__":
main()
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment