Skip to content
Snippets Groups Projects
Commit 637c104f authored by shreyasb's avatar shreyasb
Browse files

laptop changes, added code for durations

parent 83a8afc9
Branches
No related tags found
No related merge requests found
from pupil_apriltags import Detector
from cv2 import cv2
import sys
import numpy as np
from tobiiglassesctrl import TobiiGlassesController
import time
import os
import math
import config
def main():
ip = '192.168.71.50'
tobiiglasses = TobiiGlassesController(ip, video_scene=True)
project_id = tobiiglasses.create_project("Test live_scene_and_gaze.py")
participant_id = tobiiglasses.create_participant(project_id, "participant_test")
calibration_id = tobiiglasses.create_calibration(project_id, participant_id)
print(tobiiglasses.get_battery_status())
input("Put the calibration marker in front of the user, then press enter to calibrate")
tobiiglasses.start_calibration(calibration_id)
res = tobiiglasses.wait_until_calibration_is_done(calibration_id)
if res is False:
print("Calibration failed!")
exit(1)
tobiiglasses.start_streaming()
eye_data = []
max_len = 1000
min_len = 100
total_fixations = 0
total_saccades = 0
num_fixations_in_window = 0 #number of fixations in the last window
AOIfixations = [0] * 4 #screen1, screen2, screen3, screen4
AOIsaccades = [0] * 4
num_saccades_in_window = 0
AOIblinks = [0] * 4
AOIfixationDurations = [0] * 4
vel_threshold = 100 # degrees per second
# discard_short_fixations
# merge_close_fixations
test_time = 30
camera_rate = 30
num_apriltags = 15
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
cap.set(cv2.CAP_PROP_FPS, camera_rate)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # set the resolution
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
if cap.isOpened():
print("Succesfully connected to webcam...")
else:
print("Could not connect to camera...")
tobiiglasses.stop_streaming()
sys.exit(1)
background = cv2.imread('background36h11.jpg')
background = cv2.resize(background, (640, 360))
gray_back = cv2.cvtColor(background, cv2.COLOR_RGB2GRAY)
# detector = Detector(families='tag25h9', nthreads=1, quad_decimate=1.0)
detector = Detector(families='tag36h11', nthreads=1, quad_decimate=1.0)
back_detections = detector.detect(gray_back)
if len(back_detections) < num_apriltags:
print("Could not find all tags in background image, only found:", len(back_detections))
tobiiglasses.stop_streaming()
sys.exit(1)
fix = {}
#######IDS NEED TO CHANGE#############
for detection in back_detections:
id_ = detection.tag_id
center = detection.center
if id_ == 8:
s1_min_x = center[0]
elif id_ == 11:
s1_min_y = center[1]
elif id_ == 14:
s1_max_y = center[1]
elif id_ == 12:
s1_max_x = center[0]
elif id_ == 0:
s2_min_x = center[0]
s2_max_y = center[1]
elif id_ == 1:
s2_min_y = center[1]
elif id_ == 3:
s2_max_x = center[0]
elif id_ == 4:
s3_min_x = center[0]
elif id_ == 5:
s3_min_y = center[1]
elif id_ == 7:
s3_max_x = center[0]
s3_max_y = center[1]
corners = detection.corners
point_list = list(corners) + [center]
fix[id_] = point_list
H = np.eye(3)
Ex = np.eye(4)
P2 = np.dot(config.K2, Ex)
P2 = np.dot(P2, config.Rigid_body)
last_ts = 0
start = time.time()
while True:
comp_start_time = time.time()
data = tobiiglasses.get_data()
if len(eye_data) > max_len:
eye_data.pop(0)
# 3d gaze coordinate data
gaze_data = data['gp3']
# timestamp
ts = gaze_data['ts']
if 'gp3' in gaze_data.keys():
# 3d gaze coordinate
gaze3 = gaze_data['gp3']
else:
gaze3 = [-1] * 3
if len(gaze3) < 4: # Homogenizing
gaze3.append(1)
# print("gaze3 in main", gaze3)
# gaze3 = np.array(gaze3)
# pupil diameter
if 'pd' in data['left_eye']['pd'].keys():
pd_left = data['left_eye']['pd']['pd']
else:
pd_left = -1
if 'pd' in data['right_eye']['pd'].keys():
pd_right = data['right_eye']['pd']['pd']
else:
pd_right = -1
# Right eye pupil postion
Pc_right = data['right_eye']['pc']
if 'pc' in Pc_right.keys():
pc_right = Pc_right['pc']
else:
pc_right = [-1] * 3
_, frame = cap.read()
gray_frame = cv2.resize(frame, (640, 360))
gray_frame = cv2.cvtColor(gray_frame, cv2.COLOR_BGR2GRAY)
result = detector.detect(gray_frame)
if not(gaze3[0] == -1 and gaze3[1] == -1 and gaze3[2] == -1 or pd_left == -1 or pd_right == -1):
gaze3[0] = -gaze3[0]
gaze3[1] = -gaze3[1]
gaze_screen = -1
gaze_mapped = np.dot(P2, gaze3) ## 3X1
gaze_mapped = gaze_mapped/gaze_mapped[2]
gaze_mapped[0] = gaze_mapped[0] / 1920*640 ## TODO: Need to check 1920 -> 640
gaze_mapped[1] = gaze_mapped[1] / 1080*360
H = get_hom(result, fix)
if H is not None:
gaze_mapped = np.dot(H, gaze_mapped)
gaze_mapped = gaze_mapped/gaze_mapped[2]
# cv2.circle(bg_copy, (int(gaze_mapped_back[0]), int(gaze_mapped_back[1])), 2, (0,0,255), 40)
if gaze_mapped[0] > s1_min_x and gaze_mapped[1] > s1_min_y and gaze_mapped[0] < s1_max_x and gaze_mapped[1] < s1_max_y:
## LOOKING AT SCREEN1######
print("Looking at screen 1", end='\r')
gaze_screen = 1
elif gaze_mapped[0] > s2_min_x and gaze_mapped[1] > s2_min_y and gaze_mapped[0] < s2_max_x and gaze_mapped[1] < s2_max_y: # and gaze_mapped[1] > 180:
# LOOKING AT SCREEN2 ######
print("Looking at screen 2", end='\r')
gaze_screen = 2
elif gaze_mapped[0] > s3_min_x and gaze_mapped[1] > s3_min_y and gaze_mapped[0] < s3_max_x and gaze_mapped[1] < s3_max_y:
# LOOKING AT SCREEN 3 ###
print("Looking at screen 3", end='\r')
gaze_screen = 3
else:
print("Looking at screen 0", end='\r')
gaze_screen = -1
if ts not in eye_data:
eye_data.append([ts, gaze3, pc_right, gaze_mapped, gaze_screen])
if len(eye_data) < min_len:
continue
ret_data = computeFixations(eye_data,
vel_threshold,
total_fixations,
total_saccades,
num_fixations_in_window,
AOIfixations,
AOIsaccades,
num_saccades_in_window,
AOIfixationDurations)
AOIfixations = ret_data[0]
AOIsaccades = ret_data[1]
total_fixations = ret_data[2]
total_saccades = ret_data[3]
num_fixations_in_window = ret_data[4]
num_saccades_in_window = ret_data[5]
AOIfixationDurations = ret_data[6]
if last_ts == ts and gaze_screen > 0:
AOIblinks[gaze_screen - 1] += 1
last_ts = ts
elapsed_one_iter = time.time() - comp_start_time
if elapsed_one_iter < camera_rate * 1e-3:
time.sleep(camera_rate * 1e-3 - elapsed_one_iter)
# else:
# print("Took %2.2f s longer for computations"%(elapsed_one_iter - camera_rate * 1e-3))
elapsed = time.time() - start
if elapsed > test_time:
print()
print()
break
tobiiglasses.stop_streaming()
return total_fixations, total_saccades, AOIfixations, AOIsaccades, AOIblinks, AOIfixationDurations
def get_hom(result, fix):
src_pts = np.empty((0, 2))
dst_pts = np.empty((0, 2))
H = None
for detection in result:
if detection.tag_id in fix:
center = detection.center
corners = detection.corners
point_list = list(corners) + [center]
src_pts = np.append(src_pts, point_list, axis=0)
dst_pts = np.append(dst_pts, np.array(fix[detection.tag_id]), axis=0)
if src_pts.shape[0] > 0:
H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return H
def increment_fixations(data, AOIfixations, AOIfixationDurations):
fix_num = data[1][0]
i = 0
length = len(data[1])
while i < length:
screen = -1
single_fix_data = []
while i < length and data[1][i] == fix_num:
single_fix_data.append(data[0][i])
i += 1
if len(single_fix_data) != 0:
# print(single_fix_data[-1])
screen = single_fix_data[-1][-1]
duration = (single_fix_data[-1][-1] - single_fix_data[0][0]) * 1e-6
# print("screen", screen)
# print("Screen, fix_num", screen, fix_num)
fix_num += 1
i += 1
if screen > 0:
AOIfixations[screen - 1] += 1
AOIfixationDurations[screen - 1] += duration
# print("AOI fixations", AOIfixations)
return AOIfixations, AOIfixationDurations
def cross(v1, v2):
p1 = v1[1] * v2[2] - v1[2] * v2[1]
p2 = v1[2] * v2[0] - v1[0] * v2[2]
p3 = v1[0] * v2[1] - v1[1] * v2[0]
return math.sqrt(p1**2 + p2**2 + p3**2)
def dot(v1, v2):
dot_prod = 0
for i in range(3):
dot_prod += v1[i] * v2[i]
return dot_prod
def calculateVelocity(ts1, gp3_1, ts2, gp3_2, pc_right_1, pc_right_2):
# print("gp3_1", gp3_1)
# print("gp3_2", gp3_2)
# print("ts1", ts1)
# print("ts2", ts2)
v1 = np.array(gp3_1) - np.array(pc_right_1)
v2 = np.array(gp3_2) - np.array(pc_right_2)
cross_prod = cross(v1, v2)
dot_prod = dot(v1, v2)
angle = math.atan2(cross_prod, dot_prod) * 180 / math.pi
vel = angle / math.fabs(ts1 - ts2) * 1e6
# print("vel", vel)
return vel
def increment_saccades(data, AOIsaccades):
sac_num = data[1][0]
i = 0
length = len(data[1])
while i < length:
screen = -1
single_sac_data = []
while i < length and data[1][i] == sac_num:
single_sac_data.append(data[0][i])
i += 1
if len(single_sac_data) != 0:
# print(single_fix_data[-1])
screen = single_sac_data[-1][-1]
# print("screen", screen)
# print("Screen, fix_num", screen, fix_num)
sac_num += 1
i += 1
if screen > 0:
AOIsaccades[screen - 1] += 1
# print("AOI fixations", AOIfixations)
return AOIsaccades
def computeFixations(eye_data, vel_threshold, total_fixations, total_saccades, num_fixations_in_window, AOIfixations, AOIsaccades, num_saccades_in_window, AOIfixationDurations):
fix_num = 1
sac_num = 1
fix_num_changed = True
sac_num_changed = True
fixations = [0] * len(eye_data)
saccades = [0] * len(eye_data)
for i in range(1, len(eye_data)):
ts1 = eye_data[i - 1][0]
gp3_1 = eye_data[i - 1][1][:3]
pc_right_1 = eye_data[i - 1][2]
ts2 = eye_data[i][0]
gp3_2 = eye_data[i][1][:3]
pc_right_2 = eye_data[i][2]
invalid_data_1 = pc_right_1 == [-1, -1, -1]
invalid_data_2 = pc_right_2 == [-1, -1, -1]
if ts1 == ts2 or invalid_data_1 or invalid_data_2:
continue
vel = calculateVelocity(ts1, gp3_1, ts2, gp3_2, pc_right_1, pc_right_2)
if vel < vel_threshold:
fix_num_changed = False
fixations[i - 1] = fix_num
fixations[i] = fix_num
elif not fix_num_changed:
fix_num += 1
fix_num_changed = True
if vel >= vel_threshold:
sac_num_changed = False
saccades[i - 1] = sac_num
saccades[i] = sac_num
elif not sac_num_changed:
sac_num += 1
sac_num_changed = True
# if num_fixations_in_window == fix_num:
# do nothing
if num_fixations_in_window < max(fixations):
# map all the gaze coordinates above the num_fix_in_window
# update num_fix_in_window (to return)
# increment the fixation numbers appropriately
# increment total_fixations by the difference fix_num - num_fixations_in_window
# try:
idx = fixations.index(num_fixations_in_window + 1)
mapped_data = [eye_data[idx:], fixations[idx:]]
AOIfixations, AOIfixationDurations = increment_fixations(mapped_data, AOIfixations, AOIfixationDurations)
total_fixations = sum(AOIfixations)
num_fixations_in_window = max(fixations)
# except:
# pass
if num_saccades_in_window < max(saccades):
idx = saccades.index(num_saccades_in_window + 1)
mapped_data = [eye_data[idx:], saccades[idx:]]
AOIsaccades = increment_saccades(mapped_data, AOIsaccades)
total_saccades = sum(AOIsaccades)
num_saccades_in_window = max(saccades)
return AOIfixations, AOIsaccades, total_fixations, total_saccades, num_fixations_in_window, num_saccades_in_window, AOIfixationDurations
if __name__ == "__main__":
ret_data = main()
total_fixations = ret_data[0]
total_saccades = ret_data[1]
AOIfixations = ret_data[2]
AOIsaccades = ret_data[3]
AOIblinks = ret_data[4]
print()
print()
print("Total number of fixations", total_fixations)
print("Fixations in each AOI", AOIfixations)
print("Total number of saccades", total_saccades)
print("Saccades in each AOI", AOIsaccades)
print("Blinks in each AOI", AOIblinks)
......@@ -30,18 +30,22 @@ def main():
max_len = 1000
min_len = 100
total_fixations = 0
total_saccades = 0
num_fixations_in_window = 0 #number of fixations in the last window
AOIfixations = [0] * 4 #screen1, screen2, screen3, screen4
AOIsaccades = [0] * 4
num_saccades_in_window = 0
AOIblinks = [0] * 4
vel_threshold = 45 # degrees per second
vel_threshold = 30 # degrees per second
# discard_short_fixations
# merge_close_fixations
test_time = 60
test_time = 30
camera_rate = 30
num_apriltags = 15
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
cap.set(cv2.CAP_PROP_FPS, camera_rate)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) # set the resolution
......@@ -103,6 +107,8 @@ def main():
P2 = np.dot(config.K2, Ex)
P2 = np.dot(P2, config.Rigid_body)
last_ts = 0
start = time.time()
while True:
......@@ -193,10 +199,24 @@ def main():
if len(eye_data) < min_len:
continue
AOIfixations, total_fixations, num_fixations_in_window = computeFixations(eye_data, vel_threshold, total_fixations, num_fixations_in_window, AOIfixations)
# last_total_fixations = total_fixations
ret_data = computeFixations(eye_data,
vel_threshold,
total_fixations,
total_saccades,
num_fixations_in_window,
AOIfixations,
AOIsaccades,
num_saccades_in_window)
AOIfixations = ret_data[0]
AOIsaccades = ret_data[1]
total_fixations = ret_data[2]
total_saccades = ret_data[3]
num_fixations_in_window = ret_data[4]
num_saccades_in_window = ret_data[5]
if last_ts == ts and gaze_screen > 0:
AOIblinks[gaze_screen - 1] += 1
last_ts = ts
elapsed_one_iter = time.time() - comp_start_time
if elapsed_one_iter < camera_rate * 1e-3:
time.sleep(camera_rate * 1e-3 - elapsed_one_iter)
......@@ -205,12 +225,13 @@ def main():
elapsed = time.time() - start
if elapsed > test_time:
print()
print()
break
tobiiglasses.stop_streaming()
return total_fixations, AOIfixations
return total_fixations, total_saccades, AOIfixations, AOIsaccades, AOIblinks
def get_hom(result, fix):
src_pts = np.empty((0, 2))
......@@ -288,11 +309,39 @@ def calculateVelocity(ts1, gp3_1, ts2, gp3_2, pc_right_1, pc_right_2):
return vel
def computeFixations(eye_data, vel_threshold, total_fixations, num_fixations_in_window, AOIfixations):
def increment_saccades(data, AOIsaccades):
sac_num = data[1][0]
i = 0
length = len(data[1])
while i < length:
screen = -1
single_sac_data = []
while i < length and data[1][i] == sac_num:
single_sac_data.append(data[0][i])
i += 1
if len(single_sac_data) != 0:
# print(single_fix_data[-1])
screen = single_sac_data[-1][-1]
# print("screen", screen)
# print("Screen, fix_num", screen, fix_num)
sac_num += 1
i += 1
if screen > 0:
AOIsaccades[screen - 1] += 1
# print("AOI fixations", AOIfixations)
return AOIsaccades
def computeFixations(eye_data, vel_threshold, total_fixations, total_saccades, num_fixations_in_window, AOIfixations, AOIsaccades, num_saccades_in_window):
fix_num = 1
sac_num = 1
fix_num_changed = True
sac_num_changed = True
fixations = [0] * len(eye_data)
saccades = [0] * len(eye_data)
for i in range(1, len(eye_data)):
ts1 = eye_data[i - 1][0]
......@@ -313,11 +362,21 @@ def computeFixations(eye_data, vel_threshold, total_fixations, num_fixations_in_
fix_num_changed = False
fixations[i - 1] = fix_num
fixations[i] = fix_num
elif not fix_num_changed:
fix_num += 1
fix_num_changed = True
if vel >= vel_threshold:
sac_num_changed = False
saccades[i - 1] = sac_num
saccades[i] = sac_num
elif not sac_num_changed:
sac_num += 1
sac_num_changed = True
# if num_fixations_in_window == fix_num:
# do nothing
if num_fixations_in_window < max(fixations):
......@@ -336,9 +395,28 @@ def computeFixations(eye_data, vel_threshold, total_fixations, num_fixations_in_
# except:
# pass
return AOIfixations, total_fixations, num_fixations_in_window
if num_saccades_in_window < max(saccades):
idx = saccades.index(num_saccades_in_window + 1)
mapped_data = [eye_data[idx:], saccades[idx:]]
AOIsaccades = increment_saccades(mapped_data, AOIsaccades)
total_saccades = sum(AOIsaccades)
num_saccades_in_window = max(saccades)
return AOIfixations, AOIsaccades, total_fixations, total_saccades, num_fixations_in_window, num_saccades_in_window
if __name__ == "__main__":
total_fixations, AOIfixations = main()
ret_data = main()
total_fixations = ret_data[0]
total_saccades = ret_data[1]
AOIfixations = ret_data[2]
AOIsaccades = ret_data[3]
AOIblinks = ret_data[4]
print()
print()
print("Total number of fixations", total_fixations)
print("Fixations in each AOI", AOIfixations)
print("Total number of saccades", total_saccades)
print("Saccades in each AOI", AOIsaccades)
print("Blinks in each AOI", AOIblinks)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment