diff --git a/leaderboard/scripts/run_evaluation.sh b/leaderboard/scripts/run_evaluation.sh
index ef0740fe656e2d1254bcc120f94a65cfde948d11..f3337f31330bc68e57c7b07aaccdd7d6162a8bae 100755
--- a/leaderboard/scripts/run_evaluation.sh
+++ b/leaderboard/scripts/run_evaluation.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-export CARLA_ROOT=<path_to_carla_directory>
+export CARLA_ROOT=carla
 export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh
 export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI
 export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla
@@ -16,11 +16,11 @@ export TM_PORT=8000 # port for traffic manager, required when spawning multiple
 export DEBUG_CHALLENGE=0
 export REPETITIONS=1 # multiple evaluation runs
 export ROUTES=leaderboard/data/evaluation_routes/routes_town05_long.xml
-export TEAM_AGENT=leaderboard/team_code/aim_agent.py # agent
-export TEAM_CONFIG=aim/log/aim_ckpt # model checkpoint
+export TEAM_AGENT=leaderboard/team_code/auto_pilot.py # agent
+export TEAM_CONFIG=aim/log/aim_ckpt # model checkpoint, not required for expert
 export CHECKPOINT_ENDPOINT=results/sample_result.json # results file
 export SCENARIOS=leaderboard/data/scenarios/no_scenarios.json
-export SAVE_PATH=visualization/aim # path for saving episodes while evaluating
+export SAVE_PATH=data/expert # path for saving episodes while evaluating
 export RESUME=True
 
 python3 ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \
diff --git a/leaderboard/team_code/auto_pilot.py b/leaderboard/team_code/auto_pilot.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f741fad95f30bc50c5934453db6393e66b3340
--- /dev/null
+++ b/leaderboard/team_code/auto_pilot.py
@@ -0,0 +1,414 @@
+import os
+import time
+import datetime
+import pathlib
+import json
+import random
+
+import numpy as np
+import cv2
+import carla
+from PIL import Image
+
+from team_code.map_agent import MapAgent
+from team_code.pid_controller import PIDController
+
+
+SAVE_PATH = os.environ.get('SAVE_PATH', None)
+
+
+WEATHERS = {
+        'ClearNoon': carla.WeatherParameters.ClearNoon,
+        'ClearSunset': carla.WeatherParameters.ClearSunset,
+
+        'CloudyNoon': carla.WeatherParameters.CloudyNoon,
+        'CloudySunset': carla.WeatherParameters.CloudySunset,
+
+        'WetNoon': carla.WeatherParameters.WetNoon,
+        'WetSunset': carla.WeatherParameters.WetSunset,
+
+        'MidRainyNoon': carla.WeatherParameters.MidRainyNoon,
+        'MidRainSunset': carla.WeatherParameters.MidRainSunset,
+
+        'WetCloudyNoon': carla.WeatherParameters.WetCloudyNoon,
+        'WetCloudySunset': carla.WeatherParameters.WetCloudySunset,
+
+        'HardRainNoon': carla.WeatherParameters.HardRainNoon,
+        'HardRainSunset': carla.WeatherParameters.HardRainSunset,
+
+        'SoftRainNoon': carla.WeatherParameters.SoftRainNoon,
+        'SoftRainSunset': carla.WeatherParameters.SoftRainSunset,
+}
+WEATHERS_IDS = list(WEATHERS)
+
+
+def get_entry_point():
+    return 'AutoPilot'
+
+
+def _numpy(carla_vector, normalize=False):
+    result = np.float32([carla_vector.x, carla_vector.y])
+
+    if normalize:
+        return result / (np.linalg.norm(result) + 1e-4)
+
+    return result
+
+
+def _location(x, y, z):
+    return carla.Location(x=float(x), y=float(y), z=float(z))
+
+
+def _orientation(yaw):
+    return np.float32([np.cos(np.radians(yaw)), np.sin(np.radians(yaw))])
+
+
+def get_collision(p1, v1, p2, v2):
+    A = np.stack([v1, -v2], 1)
+    b = p2 - p1
+
+    if abs(np.linalg.det(A)) < 1e-3:
+        return False, None
+
+    x = np.linalg.solve(A, b)
+    collides = all(x >= 0) and all(x <= 1) # how many seconds until collision
+
+    return collides, p1 + x[0] * v1
+
+
+def check_episode_has_noise(lat_noise_percent, long_noise_percent):
+    lat_noise = False
+    long_noise = False
+    if random.randint(0, 101) < lat_noise_percent:
+        lat_noise = True
+
+    if random.randint(0, 101) < long_noise_percent:
+        long_noise = True
+
+    return lat_noise, long_noise
+
+
+class AutoPilot(MapAgent):
+
+    # for stop signs
+    PROXIMITY_THRESHOLD = 30.0  # meters
+    SPEED_THRESHOLD = 0.1
+    WAYPOINT_STEP = 1.0  # meters
+
+    def setup(self, path_to_conf_file):
+        super().setup(path_to_conf_file)
+        self.weather_id = None
+
+        if SAVE_PATH is not None:
+            now = datetime.datetime.now()
+            string = pathlib.Path(os.environ['ROUTES']).stem + '_'
+            string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))
+
+            print(string)
+
+            self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string
+            self.save_path.mkdir(parents=True, exist_ok=True)
+
+            for sensor in self.sensors():
+                if 'camera' in sensor['type'] or 'lidar' in sensor['type']:   
+                    if 'map' not in sensor['id']:
+                        (self.save_path / sensor['id']).mkdir(parents=True, exist_ok=True)
+            (self.save_path / 'measurements').mkdir(parents=True, exist_ok=True)
+            (self.save_path / 'topdown').mkdir(parents=True, exist_ok=True)
+
+            
+    def _init(self):
+        super()._init()
+
+        self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
+        self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)
+
+        # for stop signs
+        self._target_stop_sign = None # the stop sign affecting the ego vehicle
+        self._stop_completed = False # if the ego vehicle has completed the stop sign
+        self._affected_by_stop = False # if the ego vehicle is influenced by a stop sign
+
+    def _get_angle_to(self, pos, theta, target):
+        R = np.array([
+            [np.cos(theta), -np.sin(theta)],
+            [np.sin(theta),  np.cos(theta)],
+            ])
+
+        aim = R.T.dot(target - pos)
+        angle = -np.degrees(np.arctan2(-aim[1], aim[0]))
+        angle = 0.0 if np.isnan(angle) else angle 
+
+        return angle
+
+    def _get_control(self, target, far_target, tick_data):
+        pos = self._get_position(tick_data)
+        theta = tick_data['compass']
+        speed = tick_data['speed']
+
+        # Steering.
+        angle_unnorm = self._get_angle_to(pos, theta, target)
+        angle = angle_unnorm / 90
+
+        steer = self._turn_controller.step(angle)
+        steer = np.clip(steer, -1.0, 1.0)
+        steer = round(steer, 3)
+
+        # Acceleration.
+        angle_far_unnorm = self._get_angle_to(pos, theta, far_target)
+        should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0
+        target_speed = 4.0 if should_slow else 7.0
+        brake = self._should_brake()
+        target_speed = target_speed if not brake else 0.0
+
+        delta = np.clip(target_speed - speed, 0.0, 0.25)
+        throttle = self._speed_controller.step(delta)
+        throttle = np.clip(throttle, 0.0, 0.75)
+
+        if brake:
+            steer *= 0.5
+            throttle = 0.0
+
+        return steer, throttle, brake, target_speed
+
+    def run_step(self, input_data, timestamp):
+        if not self.initialized:
+            self._init()
+
+        # change weather for visual diversity
+        if self.step % 10 == 0:
+            index = random.choice(range(len(WEATHERS)))
+            self.weather_id = WEATHERS_IDS[index]
+            weather = WEATHERS[WEATHERS_IDS[index]]
+            print (self.weather_id, weather)
+            self._world.set_weather(weather)
+
+        data = self.tick(input_data)
+        gps = self._get_position(data)
+
+        near_node, near_command = self._waypoint_planner.run_step(gps)
+        far_node, far_command = self._command_planner.run_step(gps)
+
+        steer, throttle, brake, target_speed = self._get_control(near_node, far_node, data)
+
+        control = carla.VehicleControl()
+        control.steer = steer + 1e-2 * np.random.randn()
+        control.throttle = throttle
+        control.brake = float(brake)
+
+        if self.step % 10 == 0 and self.save_path is not None:
+            self.save(far_node, near_command, steer, throttle, brake, target_speed, data)
+
+        return control
+
+    def _should_brake(self):
+        actors = self._world.get_actors()
+
+        vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*'))
+        light = self._is_light_red(actors.filter('*traffic_light*'))
+        walker = self._is_walker_hazard(actors.filter('*walker*'))
+        stop_sign = self._is_stop_sign_hazard(actors.filter('*stop*'))
+
+        return any(x is not None for x in [vehicle, light, walker, stop_sign])
+
+    def _point_inside_boundingbox(point, bb_center, bb_extent):
+        A = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y - bb_extent.y)
+        B = carla.Vector2D(bb_center.x + bb_extent.x, bb_center.y - bb_extent.y)
+        D = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y + bb_extent.y)
+        M = carla.Vector2D(point.x, point.y)
+
+        AB = B - A
+        AD = D - A
+        AM = M - A
+        am_ab = AM.x * AB.x + AM.y * AB.y
+        ab_ab = AB.x * AB.x + AB.y * AB.y
+        am_ad = AM.x * AD.x + AM.y * AD.y
+        ad_ad = AD.x * AD.x + AD.y * AD.y
+
+        return am_ab > 0 and am_ab < ab_ab and am_ad > 0 and am_ad < ad_ad
+
+    def _get_forward_speed(self, transform=None, velocity=None):
+        """ Convert the vehicle transform directly to forward speed """
+        if not velocity:
+            velocity = self._vehicle.get_velocity()
+        if not transform:
+            transform = self._vehicle.get_transform()
+
+        vel_np = np.array([velocity.x, velocity.y, velocity.z])
+        pitch = np.deg2rad(transform.rotation.pitch)
+        yaw = np.deg2rad(transform.rotation.yaw)
+        orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])
+        speed = np.dot(vel_np, orientation)
+        return speed
+
+    def _is_actor_affected_by_stop(self, actor, stop, multi_step=20):
+        """
+        Check if the given actor is affected by the stop
+        """
+        affected = False
+        # first we run a fast coarse test
+        current_location = actor.get_location()
+        stop_location = stop.get_transform().location
+        if stop_location.distance(current_location) > self.PROXIMITY_THRESHOLD:
+            return affected
+
+        stop_t = stop.get_transform()
+        transformed_tv = stop_t.transform(stop.trigger_volume.location)
+
+        # slower and accurate test based on waypoint's horizon and geometric test
+        list_locations = [current_location]
+        waypoint = self._world.get_map().get_waypoint(current_location)
+        for _ in range(multi_step):
+            if waypoint:
+                waypoint = waypoint.next(self.WAYPOINT_STEP)[0]
+                if not waypoint:
+                    break
+                list_locations.append(waypoint.transform.location)
+
+        for actor_location in list_locations:
+            if self._point_inside_boundingbox(actor_location, transformed_tv, stop.trigger_volume.extent):
+                affected = True
+
+        return affected
+
+    def _is_stop_sign_hazard(self, stop_sign_list):
+        if self._affected_by_stop:
+            if not self._stop_completed:
+                current_speed = self._get_forward_speed()
+                if current_speed < self.SPEED_THRESHOLD:
+                    self._stop_completed = True
+                    return None
+                else:
+                    return self._target_stop_sign
+            else:
+                # reset if the ego vehicle is outside the influence of the current stop sign
+                if not self._is_actor_affected_by_stop(self._vehicle, self._target_stop_sign):
+                    self._affected_by_stop = False
+                    self._stop_completed = False
+                    self._target_stop_sign = None
+                return None
+
+        ve_tra = self._vehicle.get_transform()
+        ve_dir = ve_tra.get_forward_vector()
+
+        wp = self._world.get_map().get_waypoint(ve_tra.location)
+        wp_dir = wp.transform.get_forward_vector()
+
+        dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z
+
+        if dot_ve_wp > 0:  # Ignore all when going in a wrong lane
+            for stop_sign in stop_sign_list:
+                if self._is_actor_affected_by_stop(self._vehicle, stop_sign):
+                    # this stop sign is affecting the vehicle
+                    self._affected_by_stop = True
+                    self._target_stop_sign = stop_sign
+                    return self._target_stop_sign
+
+        return None
+
+    def _is_light_red(self, lights_list):
+        if self._vehicle.get_traffic_light_state() != carla.libcarla.TrafficLightState.Green:
+            affecting = self._vehicle.get_traffic_light()
+
+            for light in self._traffic_lights:
+                if light.id == affecting.id:
+                    return affecting
+
+        return None
+
+    def _is_walker_hazard(self, walkers_list):
+        z = self._vehicle.get_location().z
+        p1 = _numpy(self._vehicle.get_location())
+        v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw)
+
+        for walker in walkers_list:
+            v2_hat = _orientation(walker.get_transform().rotation.yaw)
+            s2 = np.linalg.norm(_numpy(walker.get_velocity()))
+
+            if s2 < 0.05:
+                v2_hat *= s2
+
+            p2 = -3.0 * v2_hat + _numpy(walker.get_location())
+            v2 = 8.0 * v2_hat
+
+            collides, collision_point = get_collision(p1, v1, p2, v2)
+
+            if collides:
+                return walker
+
+        return None
+
+    def _is_vehicle_hazard(self, vehicle_list):
+        z = self._vehicle.get_location().z
+
+        o1 = _orientation(self._vehicle.get_transform().rotation.yaw)
+        p1 = _numpy(self._vehicle.get_location())
+        s1 = max(10, 3.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity()))) # increases the threshold distance
+        v1_hat = o1
+        v1 = s1 * v1_hat
+
+        for target_vehicle in vehicle_list:
+            if target_vehicle.id == self._vehicle.id:
+                continue
+
+            o2 = _orientation(target_vehicle.get_transform().rotation.yaw)
+            p2 = _numpy(target_vehicle.get_location())
+            s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity())))
+            v2_hat = o2
+            v2 = s2 * v2_hat
+
+            p2_p1 = p2 - p1
+            distance = np.linalg.norm(p2_p1)
+            p2_p1_hat = p2_p1 / (distance + 1e-4)
+
+            angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat)))
+            angle_between_heading = np.degrees(np.arccos(o1.dot(o2)))
+
+            # to consider -ve angles too
+            angle_to_car = min(angle_to_car, 360.0 - angle_to_car)
+            angle_between_heading = min(angle_between_heading, 360.0 - angle_between_heading)
+
+            if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1):
+                continue
+            elif angle_to_car > 30.0:
+                continue
+            elif distance > s1:
+                continue
+
+            return target_vehicle
+
+        return None
+
+    def save(self, far_node, near_command, steer, throttle, brake, target_speed, tick_data):
+        frame = self.step // 10
+
+        pos = self._get_position(tick_data)
+        theta = tick_data['compass']
+        speed = tick_data['speed']
+
+        data = {
+                'x': pos[0],
+                'y': pos[1],
+                'theta': theta,
+                'speed': speed,
+                'target_speed': target_speed,
+                'x_command': far_node[0],
+                'y_command': far_node[1],
+                'command': near_command.value,
+                'steer': steer,
+                'throttle': throttle,
+                'brake': brake,
+                'weather': self.weather_id,
+                }
+
+        for sensor in self.sensors():
+            if 'camera' in sensor['type'] and 'map' not in sensor['id']:
+                Image.fromarray(tick_data[sensor['id']]).save(self.save_path / sensor['id'] / ('%04d.png' % frame))
+            elif 'lidar' in sensor['type']:
+                np.save(self.save_path / 'lidar' / ('%04d.npy' % frame), tick_data['lidar'], allow_pickle=True)
+
+        Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' / ('%04d.png' % frame))
+        measurements_file = self.save_path / 'measurements' / ('%04d.json' % frame)
+        f = open(measurements_file, 'w')
+        json.dump(data, f, indent=4)
+        f.close()
+        
diff --git a/leaderboard/team_code/base_agent.py b/leaderboard/team_code/base_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6212a7f0657d97bbb02c6ec753c51d185edbb3a
--- /dev/null
+++ b/leaderboard/team_code/base_agent.py
@@ -0,0 +1,189 @@
+import time
+
+import cv2
+import carla
+
+from leaderboard.autoagents import autonomous_agent
+from team_code.planner import RoutePlanner
+
+
+# Full scale sensor setup including depth, semantics and lidar
+class BaseAgent(autonomous_agent.AutonomousAgent):
+    def setup(self, path_to_conf_file):
+        self.track = autonomous_agent.Track.SENSORS
+        self.config_path = path_to_conf_file
+        self.step = -1
+        self.wall_start = time.time()
+        self.initialized = False
+
+    def _init(self):
+        self._command_planner = RoutePlanner(7.5, 25.0, 257)
+        self._command_planner.set_route(self._global_plan, True)
+
+        self.initialized = True
+
+    def _get_position(self, tick_data):
+        gps = tick_data['gps']
+        gps = (gps - self._command_planner.mean) * self._command_planner.scale
+
+        return gps
+
+    def sensors(self): # extra sensors added by aditya
+        return [
+                {
+                    'type': 'sensor.camera.rgb',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'rgb_front'
+                    },
+                {
+                    'type': 'sensor.camera.semantic_segmentation',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'seg_front'
+                    },
+                {
+                    'type': 'sensor.camera.depth',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'depth_front'
+                    },
+                {
+                    'type': 'sensor.camera.rgb',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'rgb_left'
+                    },
+                {
+                    'type': 'sensor.camera.semantic_segmentation',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'seg_left'
+                    },
+                {
+                    'type': 'sensor.camera.depth',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'depth_left'
+                    },
+                {
+                    'type': 'sensor.camera.rgb',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'rgb_rear'
+                    },
+                {
+                    'type': 'sensor.camera.semantic_segmentation',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'seg_rear'
+                    },
+                {
+                    'type': 'sensor.camera.depth',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'depth_rear'
+                    },
+                {
+                    'type': 'sensor.camera.rgb',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'rgb_right'
+                    },
+                {
+                    'type': 'sensor.camera.semantic_segmentation',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'seg_right'
+                    },
+                {
+                    'type': 'sensor.camera.depth',
+                    'x': 1.3, 'y': 0.0, 'z': 2.3,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
+                    'width': 400, 'height': 300, 'fov': 100,
+                    'id': 'depth_right'
+                    },
+                {   
+                    'type': 'sensor.lidar.ray_cast',
+                    'x': 1.3, 'y': 0.0, 'z': 2.5,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
+                    'id': 'lidar'
+                    },
+                {
+                    'type': 'sensor.other.imu',
+                    'x': 0.0, 'y': 0.0, 'z': 0.0,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
+                    'sensor_tick': 0.05,
+                    'id': 'imu'
+                    },
+                {
+                    'type': 'sensor.other.gnss',
+                    'x': 0.0, 'y': 0.0, 'z': 0.0,
+                    'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
+                    'sensor_tick': 0.01,
+                    'id': 'gps'
+                    },
+                {
+                    'type': 'sensor.speedometer',
+                    'reading_frequency': 20,
+                    'id': 'speed'
+                    }
+                ]
+
+    def tick(self, input_data):
+        self.step += 1
+
+        rgb_front = cv2.cvtColor(input_data['rgb_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        
+        gps = input_data['gps'][1][:2]
+        speed = input_data['speed'][1]['speed']
+        compass = input_data['imu'][1][-1]
+
+        # segmentation
+        seg_front = cv2.cvtColor(input_data['seg_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        seg_left = cv2.cvtColor(input_data['seg_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        seg_rear = cv2.cvtColor(input_data['seg_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        seg_right = cv2.cvtColor(input_data['seg_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        
+        # depth
+        depth_front = cv2.cvtColor(input_data['depth_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        depth_left = cv2.cvtColor(input_data['depth_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        depth_rear = cv2.cvtColor(input_data['depth_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        depth_right = cv2.cvtColor(input_data['depth_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
+        
+        # lidar
+        lidar = input_data['lidar'][1]
+        
+
+        return {
+                'rgb_front': rgb_front,
+                'rgb_left': rgb_left,
+                'rgb_rear': rgb_rear,
+                'rgb_right': rgb_right,
+                'seg_front': seg_front,
+                'seg_left': seg_left,
+                'seg_rear': seg_rear,
+                'seg_right': seg_right,
+                'depth_front': depth_front,
+                'depth_left': depth_left,
+                'depth_rear': depth_rear,
+                'depth_right': depth_right,
+                'lidar': lidar,
+                'gps': gps,
+                'speed': speed,
+                'compass': compass
+                }
\ No newline at end of file
diff --git a/leaderboard/team_code/map_agent.py b/leaderboard/team_code/map_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8c0f3a068681ff157b299fe1911df1827c250eb
--- /dev/null
+++ b/leaderboard/team_code/map_agent.py
@@ -0,0 +1,179 @@
+import numpy as np
+from PIL import Image, ImageDraw
+
+from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
+
+from team_code.base_agent import BaseAgent
+from team_code.planner import RoutePlanner
+
+
+class MapAgent(BaseAgent):
+    def sensors(self):
+        result = super().sensors()
+        result.append({
+            'type': 'sensor.camera.semantic_segmentation',
+            'x': 0.0, 'y': 0.0, 'z': 100.0,
+            'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0,
+            'width': 512, 'height': 512, 'fov': 5 * 10.0,
+            'id': 'map'
+            })
+
+        return result
+
+    def set_global_plan(self, global_plan_gps, global_plan_world_coord):
+        super().set_global_plan(global_plan_gps, global_plan_world_coord)
+
+        self._plan_HACK = global_plan_world_coord
+        self._plan_gps_HACK = global_plan_gps
+
+    def _init(self):
+        super()._init()
+
+        self._vehicle = CarlaDataProvider.get_hero_actor()
+        self._world = self._vehicle.get_world()
+
+        self._waypoint_planner = RoutePlanner(4.0, 50)
+        self._waypoint_planner.set_route(self._plan_gps_HACK, True)
+
+        self._traffic_lights = list()
+
+    def tick(self, input_data):
+        self._actors = self._world.get_actors()
+        self._traffic_lights = get_nearby_lights(self._vehicle, self._actors.filter('*traffic_light*'))
+        self._stop_signs = get_nearby_lights(self._vehicle, self._actors.filter('*stop*'))
+
+        topdown = input_data['map'][1][:, :, 2]
+        topdown = draw_traffic_lights(topdown, self._vehicle, self._traffic_lights)
+        topdown = draw_stop_signs(topdown, self._vehicle, self._stop_signs)
+
+        result = super().tick(input_data)
+        result['topdown'] = topdown
+
+        return result
+
+
+def get_nearby_lights(vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
+    result = list()
+
+    transform = vehicle.get_transform()
+    pos = transform.location
+    theta = np.radians(90 + transform.rotation.yaw)
+    R = np.array([
+        [np.cos(theta), -np.sin(theta)],
+        [np.sin(theta),  np.cos(theta)],
+        ])
+
+    for light in lights:
+        delta = light.get_transform().location - pos
+
+        target = R.T.dot([delta.x, delta.y])
+        target *= pixels_per_meter
+        target += size // 2
+
+        if min(target) < 0 or max(target) >= size:
+            continue
+
+        trigger = light.trigger_volume
+        light.get_transform().transform(trigger.location)
+        dist = trigger.location.distance(vehicle.get_location())
+        a = np.sqrt(
+                trigger.extent.x ** 2 +
+                trigger.extent.y ** 2 +
+                trigger.extent.z ** 2)
+        b = np.sqrt(
+                vehicle.bounding_box.extent.x ** 2 +
+                vehicle.bounding_box.extent.y ** 2 +
+                vehicle.bounding_box.extent.z ** 2)
+
+        if dist > a + b:
+            continue
+
+        result.append(light)
+
+    return result
+
+
+def draw_traffic_lights(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
+    image = Image.fromarray(image)
+    draw = ImageDraw.Draw(image)
+
+    transform = vehicle.get_transform()
+    pos = transform.location
+    theta = np.radians(90 + transform.rotation.yaw)
+    R = np.array([
+        [np.cos(theta), -np.sin(theta)],
+        [np.sin(theta),  np.cos(theta)],
+        ])
+
+    for light in lights:
+        delta = light.get_transform().location - pos
+
+        target = R.T.dot([delta.x, delta.y])
+        target *= pixels_per_meter
+        target += size // 2
+
+        if min(target) < 0 or max(target) >= size:
+            continue
+
+        trigger = light.trigger_volume
+        light.get_transform().transform(trigger.location)
+        dist = trigger.location.distance(vehicle.get_location())
+        a = np.sqrt(
+                trigger.extent.x ** 2 +
+                trigger.extent.y ** 2 +
+                trigger.extent.z ** 2)
+        b = np.sqrt(
+                vehicle.bounding_box.extent.x ** 2 +
+                vehicle.bounding_box.extent.y ** 2 +
+                vehicle.bounding_box.extent.z ** 2)
+
+        if dist > a + b:
+            continue
+
+        x, y = target
+        draw.ellipse((x-radius, y-radius, x+radius, y+radius), 23 + light.state.real) # 13 changed to 23 for carla 0.9.10
+
+    return np.array(image)
+
+
+def draw_stop_signs(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
+    image = Image.fromarray(image)
+    draw = ImageDraw.Draw(image)
+
+    transform = vehicle.get_transform()
+    pos = transform.location
+    theta = np.radians(90 + transform.rotation.yaw)
+    R = np.array([
+        [np.cos(theta), -np.sin(theta)],
+        [np.sin(theta),  np.cos(theta)],
+        ])
+
+    for light in lights:
+        delta = light.get_transform().location - pos
+
+        target = R.T.dot([delta.x, delta.y])
+        target *= pixels_per_meter
+        target += size // 2
+
+        if min(target) < 0 or max(target) >= size:
+            continue
+
+        trigger = light.trigger_volume
+        light.get_transform().transform(trigger.location)
+        dist = trigger.location.distance(vehicle.get_location())
+        a = np.sqrt(
+                trigger.extent.x ** 2 +
+                trigger.extent.y ** 2 +
+                trigger.extent.z ** 2)
+        b = np.sqrt(
+                vehicle.bounding_box.extent.x ** 2 +
+                vehicle.bounding_box.extent.y ** 2 +
+                vehicle.bounding_box.extent.z ** 2)
+
+        if dist > a + b:
+            continue
+
+        x, y = target
+        draw.ellipse((x-radius, y-radius, x+radius, y+radius), 26)
+
+    return np.array(image)
\ No newline at end of file