diff --git a/.gitignore b/.gitignore index 9107375f..c9c92b35 100644 --- a/.gitignore +++ b/.gitignore @@ -148,4 +148,4 @@ evaluation_outputs/ docs/tmp.py data_dumping video_dumping - +opv2v_data_dumping diff --git a/opencda/core/common/cav_world.py b/opencda/core/common/cav_world.py index d470939f..794bab8f 100644 --- a/opencda/core/common/cav_world.py +++ b/opencda/core/common/cav_world.py @@ -29,6 +29,9 @@ class CavWorld(object): _platooning_dict : dict A dictionary that stores platooning managers. + _rsu_manager_dict : dict + A dictionary that stores RSU managers. + ml_manager : opencda object. The machine learning manager class. """ @@ -38,6 +41,7 @@ def __init__(self, apply_ml=False): self.vehicle_id_set = set() self._vehicle_manager_dict = {} self._platooning_dict = {} + self._rsu_manager_dict = {} self.ml_manager = None if apply_ml: @@ -76,6 +80,17 @@ def update_platooning(self, platooning_manger): self._platooning_dict.update( {platooning_manger.pmid: platooning_manger}) + def update_rsu_manager(self, rsu_manager): + """ + Add rsu manager. + + Parameters + ---------- + rsu_manager : opencda object + The RSU manager class. + """ + self._rsu_manager_dict.update({rsu_manager.rid: rsu_manager}) + def update_sumo_vehicles(self, sumo2carla_ids): """ Update the sumo carla mapping dict. This is only called diff --git a/opencda/core/common/data_dumper.py b/opencda/core/common/data_dumper.py index 8e4449ee..cf5f5e5f 100644 --- a/opencda/core/common/data_dumper.py +++ b/opencda/core/common/data_dumper.py @@ -201,9 +201,13 @@ def save_yaml_file(self, dump_yml.update({'vehicles': vehicle_dict}) - # dump ego pose and speed + # dump ego pose and speed, if vehicle does not exist, then it is + # a rsu(road side unit). predicted_ego_pos = localization_manager.get_ego_pos() - true_ego_pos = localization_manager.vehicle.get_transform() + true_ego_pos = localization_manager.vehicle.get_transform() \ + if hasattr(localization_manager, 'vehicle') \ + else localization_manager.true_ego_pos + dump_yml.update({'predicted_ego_pos': [ predicted_ego_pos.location.x, predicted_ego_pos.location.y, @@ -261,19 +265,23 @@ def save_yaml_file(self, camera_param.update({'extrinsic': lidar2camera}) dump_yml.update({'camera%d' % i: camera_param}) - # dump the planned trajectory - trajectory_deque = behavior_agent.get_local_planner().get_trajectory() - trajectory_list = [] + dump_yml.update({'RSU': True}) + # dump the planned trajectory if it exisit. + if behavior_agent is not None: + trajectory_deque = \ + behavior_agent.get_local_planner().get_trajectory() + trajectory_list = [] - for i in range(len(trajectory_deque)): - tmp_buffer = trajectory_deque.popleft() - x = tmp_buffer[0].location.x - y = tmp_buffer[0].location.y - spd = tmp_buffer[1] + for i in range(len(trajectory_deque)): + tmp_buffer = trajectory_deque.popleft() + x = tmp_buffer[0].location.x + y = tmp_buffer[0].location.y + spd = tmp_buffer[1] - trajectory_list.append([x, y, spd]) + trajectory_list.append([x, y, spd]) - dump_yml.update({'plan_trajectory': trajectory_list}) + dump_yml.update({'plan_trajectory': trajectory_list}) + dump_yml.update({'RSU': False}) yml_name = '%06d' % frame + '.yaml' save_path = os.path.join(self.save_parent_folder, diff --git a/opencda/core/common/rsu_manager.py b/opencda/core/common/rsu_manager.py new file mode 100644 index 00000000..94f99508 --- /dev/null +++ b/opencda/core/common/rsu_manager.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +Basic class for RSU(Roadside Unit) management. +""" +# Author: Runsheng Xu +# License: TDG-Attribution-NonCommercial-NoDistrib + +from opencda.core.common.data_dumper import DataDumper +from opencda.core.sensing.perception.perception_manager import \ + PerceptionManager +from opencda.core.sensing.localization.rsu_localization_manager import \ + LocalizationManager + + +class RSUManager(object): + """ + A class manager for RSU. Currently a RSU only has perception and + localization modules to dump sensing information. + TODO: add V2X module to it to enable sharing sensing information online. + + Parameters + ---------- + carla_world : carla.World + CARLA simulation world, we need this for blueprint creation. + + config_yaml : dict + The configuration dictionary of the RSU. + + carla_map : carla.Map + The CARLA simulation map. + + cav_world : opencda object + CAV World for simulation V2X communication. + + current_time : str + Timestamp of the simulation beginning, this is used for data dump. + + data_dumping : bool + Indicates whether to dump sensor data during simulation. + + Attributes + ---------- + localizer : opencda object + The current localization manager. + + perception_manager : opencda object + The current V2X perception manager. + + data_dumper : opencda object + Used for dumping sensor data. + """ + def __init__( + self, + carla_world, + config_yaml, + carla_map, + cav_world, + current_time='', + data_dumping=False): + + self.rid = config_yaml['id'] + # The id of rsu is always a negative int + if self.rid > 0: + self.rid = -self.rid + + # read map from the world everytime is time-consuming, so we need + # explicitly extract here + self.carla_map = carla_map + + # retrieve the configure for different modules + # todo: add v2x module to rsu later + sensing_config = config_yaml['sensing'] + sensing_config['localization']['global_position'] = \ + config_yaml['spawn_position'] + sensing_config['perception']['global_position'] = \ + config_yaml['spawn_position'] + + # localization module + self.localizer = LocalizationManager(carla_world, + sensing_config['localization'], + self.carla_map) + # perception module + self.perception_manager = PerceptionManager(vehicle=None, + config_yaml=sensing_config['perception'], + cav_world=cav_world, + carla_world=carla_world, + data_dump=data_dumping, + infra_id=self.rid) + if data_dumping: + self.data_dumper = DataDumper(self.perception_manager, + self.rid, + save_time=current_time) + else: + self.data_dumper = None + + cav_world.update_rsu_manager(self) + + def update_info(self): + """ + Call perception and localization module to + retrieve surrounding info an ego position. + """ + # localization + self.localizer.localize() + + ego_pos = self.localizer.get_ego_pos() + ego_spd = self.localizer.get_ego_spd() + + # object detection todo: pass it to other CAVs for V2X percetion + objects = self.perception_manager.detect(ego_pos) + + def run_step(self): + """ + Currently only used for dumping data. + """ + # dump data + if self.data_dumper: + self.data_dumper.run_step(self.perception_manager, + self.localizer, + None) + + def destroy(self): + """ + Destroy the actor vehicle + """ + self.perception_manager.destroy() + self.localizer.destroy() diff --git a/opencda/core/common/v2x_manager.py b/opencda/core/common/v2x_manager.py index 99466c82..fba0e193 100644 --- a/opencda/core/common/v2x_manager.py +++ b/opencda/core/common/v2x_manager.py @@ -79,6 +79,7 @@ def __init__(self, cav_world, config_yaml, vid): self.speed_noise = 0.0 self.lag = 0 + # Add noise to V2X communication if needed. if 'loc_noise' in config_yaml: self.loc_noise = config_yaml['loc_noise'] if 'yaw_noise' in config_yaml: diff --git a/opencda/core/common/vehicle_manager.py b/opencda/core/common/vehicle_manager.py index 7da20f5a..ac5df7a1 100644 --- a/opencda/core/common/vehicle_manager.py +++ b/opencda/core/common/vehicle_manager.py @@ -32,7 +32,7 @@ class VehicleManager(object): The carla.Vehicle. We need this class to spawn our gnss and imu sensor. config_yaml : dict - The configuration dictionary of the localization module. + The configuration dictionary of this CAV. application : list The application category, currently support:['single','platoon']. diff --git a/opencda/core/sensing/localization/rsu_localization_manager.py b/opencda/core/sensing/localization/rsu_localization_manager.py new file mode 100644 index 00000000..9f23acd1 --- /dev/null +++ b/opencda/core/sensing/localization/rsu_localization_manager.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" +Localization module for RSU. +""" +# Author: Runsheng Xu +# License: TDG-Attribution-NonCommercial-NoDistrib + +import weakref +from collections import deque + +import carla + +from opencda.core.common.misc import get_speed +from opencda.core.sensing.localization.coordinate_transform \ + import geo_to_transform + + +class GnssSensor(object): + """ + The default GNSS sensor module for rsu. + + Parameters + world : carla.world + Carla world. + + config : dict + The configuration dictionary of the localization module. + + global_position : list + The global position of the rsu. + + Attributes + + blueprint : carla.blueprint + The current blueprint of the sensor actor. + + weak_self : opencda Object + A weak reference point to avoid circular reference. + + sensor : CARLA actor + The current sensor actors that will be attach to the vehicles. + """ + + def __init__(self, world, config, global_position): + blueprint = world.get_blueprint_library().find('sensor.other.gnss') + + # set the noise for gps + blueprint.set_attribute( + 'noise_alt_stddev', str( + config['noise_alt_stddev'])) + blueprint.set_attribute( + 'noise_lat_stddev', str( + config['noise_lat_stddev'])) + blueprint.set_attribute( + 'noise_lon_stddev', str( + config['noise_lon_stddev'])) + # spawn the sensor + self.sensor = world.spawn_actor( + blueprint, + carla.Transform( + carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]))) + + # latitude and longitude at current timestamp + self.lat, self.lon, self.alt, self.timestamp = 0.0, 0.0, 0.0, 0.0 + # create weak reference to avoid circular reference + weak_self = weakref.ref(self) + self.sensor.listen( + lambda event: GnssSensor._on_gnss_event( + weak_self, event)) + + @staticmethod + def _on_gnss_event(weak_self, event): + """GNSS method that returns the current geo location.""" + self = weak_self() + if not self: + return + self.lat = event.latitude + self.lon = event.longitude + self.alt = event.altitude + self.timestamp = event.timestamp + + +class LocalizationManager(object): + """ + Default localization module for infrastructure. + + Parameters + world : carla.world + CARLA world. + config_yaml : dict + The configuration dictionary of the localization module. + carla_map : carla.Map + The carla HDMap. We need this to find the map origin to + convert wg84 to enu coordinate system. + + Attributes + gnss : opencda object + GNSS sensor manager for spawning gnss sensor and listen to the data + transmission. + """ + + def __init__(self, world, config_yaml, carla_map): + + self.activate = config_yaml['activate'] + self.map = carla_map + self.geo_ref = self.map.transform_to_geolocation( + carla.Location(x=0, y=0, z=0)) + + # speed and transform and current timestamp + self._ego_pos = None + self._speed = 0 + + # history track + self._ego_pos_history = deque(maxlen=100) + self._timestamp_history = deque(maxlen=100) + + self.gnss = GnssSensor(world, + config_yaml['gnss'], + config_yaml['global_position']) + self.true_ego_pos = carla.Transform( + carla.Location(x=config_yaml['global_position'][0], + y=config_yaml['global_position'][1], + z=config_yaml['global_position'][2])) + self._speed = 0 + + def localize(self): + """ + Currently implemented in a naive way. + """ + + if not self.activate: + self._ego_pos = self.true_ego_pos + else: + x, y, z = geo_to_transform(self.gnss.lat, + self.gnss.lon, + self.gnss.alt, + self.geo_ref.latitude, + self.geo_ref.longitude, 0.0) + self._ego_pos = carla.Transform( + carla.Location(x=x, y=y, z=z)) + + def get_ego_pos(self): + """ + Retrieve ego vehicle position + """ + return self._ego_pos + + def get_ego_spd(self): + """ + Retrieve ego vehicle speed + """ + return self._speed + + def destroy(self): + """ + Destroy the sensors + """ + self.gnss.sensor.destroy() diff --git a/opencda/core/sensing/perception/perception_manager.py b/opencda/core/sensing/perception/perception_manager.py index 3c16f8ee..ecc777a9 100644 --- a/opencda/core/sensing/perception/perception_manager.py +++ b/opencda/core/sensing/perception/perception_manager.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Perception module +Perception module base. """ # Author: Runsheng Xu @@ -22,20 +22,26 @@ ObstacleVehicle from opencda.core.sensing.perception.static_obstacle import TrafficLight from opencda.core.sensing.perception.o3d_lidar_libs import \ - o3d_visualizer_init, o3d_pointcloud_encode, o3d_visualizer_show,\ + o3d_visualizer_init, o3d_pointcloud_encode, o3d_visualizer_show, \ o3d_camera_lidar_fusion class CameraSensor: """ - Camera manager. + Camera manager for vehicle or infrastructure. Parameters ---------- vehicle : carla.Vehicle - The carla.Vehicle. We need this class to spawn sensors. + The carla.Vehicle, this is for cav. - position : str + world : carla.World + The carla world object, this is for rsu. + + global_position : list + Global position of the infrastructure, [x, y, z] + + relative_position : str Indicates the sensor is a front or rear camera. option: front, left, right. @@ -47,44 +53,22 @@ class CameraSensor: The carla sensor that mounts at the vehicle. """ - def __init__(self, vehicle, position='front'): - world = vehicle.get_world() + + def __init__(self, vehicle, world, relative_position, global_position): + if vehicle is not None: + world = vehicle.get_world() + blueprint = world.get_blueprint_library().find('sensor.camera.rgb') blueprint.set_attribute('fov', '100') - if position == 'front': - spawn_point = carla.Transform( - carla.Location( - x=2.5, y=0.0, z=1.0), carla.Rotation( - pitch=0, roll=0, yaw=0)) - elif position == 'right': - spawn_point = carla.Transform( - carla.Location( - x=0.0, y=0.3, z=1.8), carla.Rotation( - pitch=0, roll=0, yaw=100)) - elif position == 'left': - spawn_point = carla.Transform( - carla.Location( - x=0.0, - y=-0.3, - z=1.8), - carla.Rotation( - pitch=0, - roll=0, - yaw=-100)) + spawn_point = self.spawn_point_estimation(relative_position, + global_position) + + if vehicle is not None: + self.sensor = world.spawn_actor( + blueprint, spawn_point, attach_to=vehicle) else: - spawn_point = carla.Transform( - carla.Location( - x=-2.0, - y=0.0, - z=1.5), - carla.Rotation( - pitch=0, - roll=0, - yaw=180)) - - self.sensor = world.spawn_actor( - blueprint, spawn_point, attach_to=vehicle) + self.sensor = world.spawn_actor(blueprint, spawn_point) self.image = None self.timstamp = None @@ -98,6 +82,47 @@ def __init__(self, vehicle, position='front'): self.image_width = int(self.sensor.attributes['image_size_x']) self.image_height = int(self.sensor.attributes['image_size_y']) + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + + if global_position is not None: + carla_location = carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]) + pitch = -35 + + if relative_position == 'front': + carla_location = carla.Location(x=carla_location.x + 2.5, + y=carla_location.y, + z=carla_location.z + 1.0) + yaw = 0 + + elif relative_position == 'right': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y + 0.3, + z=carla_location.z + 1.8) + yaw = 100 + + elif relative_position == 'left': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y - 0.3, + z=carla_location.z + 1.8) + yaw = -100 + else: + carla_location = carla.Location(x=carla_location.x - 2.0, + y=carla_location.y, + z=carla_location.z + 1.5) + yaw = 180 + + carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point + @staticmethod def _on_rgb_image_event(weak_self, event): """CAMERA method""" @@ -121,11 +146,17 @@ class LidarSensor: Parameters ---------- vehicle : carla.Vehicle - carla Vehicle, we need this to spawn sensors. + The carla.Vehicle, this is for cav. + + world : carla.World + The carla world object, this is for rsu. config_yaml : dict Configuration dictionary for lidar. + global_position : list + Global position of the infrastructure, [x, y, z] + Attributes ---------- o3d_pointcloud : 03d object @@ -136,8 +167,9 @@ class LidarSensor: """ - def __init__(self, vehicle, config_yaml): - world = vehicle.get_world() + def __init__(self, vehicle, world, config_yaml, global_position): + if vehicle is not None: + world = vehicle.get_world() blueprint = world.get_blueprint_library().find('sensor.lidar.ray_cast') # set attribute based on the configuration @@ -164,10 +196,18 @@ def __init__(self, vehicle, config_yaml): 'noise_stddev', str( config_yaml['noise_stddev'])) - # spawn sensor on vehicle - spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) - self.sensor = world.spawn_actor( - blueprint, spawn_point, attach_to=vehicle) + # spawn sensor + if global_position is None: + spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) + else: + spawn_point = carla.Transform(carla.Location(x=global_position[0], + y=global_position[1], + z=global_position[2])) + if vehicle is not None: + self.sensor = world.spawn_actor( + blueprint, spawn_point, attach_to=vehicle) + else: + self.sensor = world.spawn_actor(blueprint, spawn_point) # lidar data self.data = None @@ -206,10 +246,16 @@ class SemanticLidarSensor: Parameters ---------- vehicle : carla.Vehicle - carla Vehicle, we need this to spawn sensors. + The carla.Vehicle, this is for cav. + + world : carla.World + The carla world object, this is for rsu. config_yaml : dict - Configuration dictionary, the same as the normal lidar. + Configuration dictionary for lidar. + + global_position : list + Global position of the infrastructure, [x, y, z] Attributes ---------- @@ -221,10 +267,13 @@ class SemanticLidarSensor: """ - def __init__(self, vehicle, config_yaml): - world = vehicle.get_world() - blueprint =\ - world.get_blueprint_library().\ + + def __init__(self, vehicle, world, config_yaml, global_position): + if vehicle is not None: + world = vehicle.get_world() + + blueprint = \ + world.get_blueprint_library(). \ find('sensor.lidar.ray_cast_semantic') # set attribute based on the configuration @@ -239,10 +288,19 @@ def __init__(self, vehicle, config_yaml): 'rotation_frequency', str( config_yaml['rotation_frequency'])) - # spawn sensor on vehicle - spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) - self.sensor = world.spawn_actor( - blueprint, spawn_point, attach_to=vehicle) + # spawn sensor + if global_position is None: + spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) + else: + spawn_point = carla.Transform(carla.Location(x=global_position[0], + y=global_position[1], + z=global_position[2])) + + if vehicle is not None: + self.sensor = world.spawn_actor( + blueprint, spawn_point, attach_to=vehicle) + else: + self.sensor = world.spawn_actor(blueprint, spawn_point) # lidar data self.points = None @@ -301,6 +359,9 @@ class PerceptionManager: data_dump : bool Whether dumping data, if true, semantic lidar will be spawned. + carla_world : carla.world + CARLA world, used for rsu. + Attributes ---------- lidar : opencda object @@ -313,13 +374,19 @@ class PerceptionManager: Open3d point cloud visualizer. """ - def __init__(self, vehicle, config_yaml, cav_world, data_dump=False): + def __init__(self, vehicle, config_yaml, cav_world, + data_dump=False, carla_world=None, infra_id=None): self.vehicle = vehicle + self.carla_world = carla_world if carla_world is not None \ + else self.vehicle.get_world() + self.id = infra_id if infra_id is not None else vehicle.id self.activate = config_yaml['activate'] self.camera_visualize = config_yaml['camera_visualize'] self.camera_num = min(config_yaml['camera_num'], 4) self.lidar_visualize = config_yaml['lidar_visualize'] + self.global_position = config_yaml['global_position'] \ + if 'global_position' in config_yaml else None self.cav_world = weakref.ref(cav_world)() ml_manager = cav_world.ml_manager @@ -343,7 +410,8 @@ def __init__(self, vehicle, config_yaml, cav_world, data_dump=False): for i in range(self.camera_num): self.rgb_camera.append( CameraSensor( - vehicle, mount_position[i])) + vehicle, self.carla_world, mount_position[i], + self.global_position)) else: self.rgb_camera = None @@ -351,8 +419,11 @@ def __init__(self, vehicle, config_yaml, cav_world, data_dump=False): # we only spawn the LiDAR when perception module is activated or lidar # visualization is needed if self.activate or self.lidar_visualize: - self.lidar = LidarSensor(vehicle, config_yaml['lidar']) - self.o3d_vis = o3d_visualizer_init(vehicle.id) + self.lidar = LidarSensor(vehicle, + self.carla_world, + config_yaml['lidar'], + self.global_position) + self.o3d_vis = o3d_visualizer_init(self.id) else: self.lidar = None self.o3d_vis = None @@ -361,7 +432,9 @@ def __init__(self, vehicle, config_yaml, cav_world, data_dump=False): self.data_dump = data_dump if data_dump: self.semantic_lidar = SemanticLidarSensor(vehicle, - config_yaml['lidar']) + self.carla_world, + config_yaml['lidar'], + self.global_position) # count how many steps have been passed self.count = 0 @@ -473,14 +546,14 @@ def activate_mode(self, objects): if self.camera_visualize: names = ['front', 'right', 'left', 'back'] for (i, rgb_image) in enumerate(rgb_draw_images): - if i > self.camera_num or i > self.camera_visualize - 1: + if i > self.camera_num - 1 or i > self.camera_visualize - 1: break rgb_image = self.ml_manager.draw_2d_box( yolo_detection, rgb_image, i) rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.4, fy=0.4) cv2.imshow( '%s camera of actor %d, perception activated' % - (names[i], self.vehicle.id), rgb_image) + (names[i], self.id), rgb_image) cv2.waitKey(1) if self.lidar_visualize: @@ -514,13 +587,13 @@ def deactivate_mode(self, objects): objects: dict Updated object dictionary. """ - world = self.vehicle.get_world() + world = self.carla_world vehicle_list = world.get_actors().filter("*vehicle*") thresh = 50 if not self.data_dump else 120 vehicle_list = [v for v in vehicle_list if self.dist(v) < thresh and - v.id != self.vehicle.id] + v.id != self.id] # use semantic lidar to filter out vehicles out of the range if self.data_dump: @@ -550,18 +623,26 @@ def deactivate_mode(self, objects): if self.camera_visualize: while self.rgb_camera[0].image is None: continue - # we only visualiz the frontal camera - rgb_image = np.array(self.rgb_camera[0].image) - # draw the ground truth bbx on the camera image - rgb_image = self.visualize_3d_bbx_front_camera(objects, rgb_image) - # resize to make it fittable to the screen - rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.4, fy=0.4) - - # show image using cv2 - cv2.imshow( - 'front camera of actor %d, perception deactivated' % - self.vehicle.id, rgb_image) - cv2.waitKey(1) + + names = ['front', 'right', 'left', 'back'] + + for (i, rgb_camera) in enumerate(self.rgb_camera): + if i > self.camera_num - 1 or i > self.camera_visualize - 1: + break + # we only visualiz the frontal camera + rgb_image = np.array(rgb_camera.image) + # draw the ground truth bbx on the camera image + rgb_image = self.visualize_3d_bbx_front_camera(objects, + rgb_image, + i) + # resize to make it fittable to the screen + rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.4, fy=0.4) + + # show image using cv2 + cv2.imshow( + '%s camera of actor %d, perception deactivated' % + (names[i], self.id), rgb_image) + cv2.waitKey(1) if self.lidar_visualize: while self.lidar.data is None: @@ -614,7 +695,7 @@ def filter_vehicle_out_sensor(self, vehicle_list): return new_vehicle_list - def visualize_3d_bbx_front_camera(self, objects, rgb_image): + def visualize_3d_bbx_front_camera(self, objects, rgb_image, camera_index): """ Visualize the 3d bounding box on frontal camera image. @@ -626,17 +707,27 @@ def visualize_3d_bbx_front_camera(self, objects, rgb_image): rgb_image : np.ndarray Received rgb image at current timestamp. + camera_index : int + Indicate the index of the current camera. + """ + camera_transform = \ + self.rgb_camera[camera_index].sensor.get_transform() + camera_location = \ + camera_transform.location + camera_rotation = \ + camera_transform.rotation + for v in objects['vehicles']: # we only draw the bounding box in the fov of camera _, angle = cal_distance_angle( - v.get_location(), self.ego_pos.location, - self.ego_pos.rotation.yaw) - if angle < 30: + v.get_location(), camera_location, + camera_rotation.yaw) + if angle < 60: bbx_camera = st.get_2d_bb( v, - self.rgb_camera[0].sensor, - self.rgb_camera[0].sensor.get_transform()) + self.rgb_camera[camera_index].sensor, + camera_transform) cv2.rectangle(rgb_image, (int(bbx_camera[0, 0]), int(bbx_camera[0, 1])), (int(bbx_camera[1, 0]), int(bbx_camera[1, 1])), @@ -657,10 +748,10 @@ def speed_retrieve(self, objects): if 'vehicles' not in objects: return - world = self.vehicle.get_world() + world = self.carla_world vehicle_list = world.get_actors().filter("*vehicle*") vehicle_list = [v for v in vehicle_list if self.dist(v) < 50 and - v.id != self.vehicle.id] + v.id != self.id] # todo: consider the minimum distance to be safer in next version for v in vehicle_list: @@ -704,7 +795,7 @@ def retrieve_traffic_lights(self, objects): object : dict The updated dictionary. """ - world = self.vehicle.get_world() + world = self.carla_world tl_list = world.get_actors().filter('traffic.traffic_light*') objects.update({'traffic_lights': []}) @@ -735,4 +826,4 @@ def destroy(self): self.o3d_vis.destroy_window() if self.data_dump: - self.semantic_lidar.sensor.destroy() \ No newline at end of file + self.semantic_lidar.sensor.destroy() diff --git a/opencda/scenario_testing/config_yaml/cooperception_datadump_town06_carla.yaml b/opencda/scenario_testing/config_yaml/v2xp_datadump_town06_carla.yaml similarity index 81% rename from opencda/scenario_testing/config_yaml/cooperception_datadump_town06_carla.yaml rename to opencda/scenario_testing/config_yaml/v2xp_datadump_town06_carla.yaml index 36dbc4e7..11507dc4 100644 --- a/opencda/scenario_testing/config_yaml/cooperception_datadump_town06_carla.yaml +++ b/opencda/scenario_testing/config_yaml/v2xp_datadump_town06_carla.yaml @@ -22,7 +22,34 @@ world: fog_falloff: 0 # Density of the fog (as in specific mass) from 0 to infinity. The bigger the value, the more dense and heavy it will be, and the fog will reach smaller heights wetness: 0 -# First define the basic parameters of the vehicles +# Define the basic parameters of the rsu +rsu_base: &rsu_base + sensing: + perception: + activate: false # when not activated, objects positions will be retrieved from server directly + camera_visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera + camera_num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + lidar_visualize: true # whether to visualize lidar points using open3d + lidar: # lidar sensor configuration, check CARLA sensor reference for more details + channels: 32 + range: 120 + points_per_second: 1000000 + rotation_frequency: 20 # the simulation is 20 fps + upper_fov: 2 + lower_fov: -25 + dropoff_general_rate: 0.3 + dropoff_intensity_limit: 0.7 + dropoff_zero_intensity: 0.4 + noise_stddev: 0.02 + localization: + activate: true # when not activated, ego position will be retrieved from server directly + dt: *delta # used for kalman filter + gnss: # gnss sensor configuration + noise_alt_stddev: 0.05 + noise_lat_stddev: 3e-6 + noise_lon_stddev: 3e-6 + +# Define the basic parameters of the vehicles vehicle_base: &vehicle_base sensing: &base_sensing perception: &base_perception @@ -31,15 +58,15 @@ vehicle_base: &vehicle_base camera_num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) lidar_visualize: true # whether to visualize lidar points using open3d lidar: # lidar sensor configuration, check CARLA sensor reference for more details - channels: 64 + channels: 32 range: 120 - points_per_second: 1300000 + points_per_second: 1000000 rotation_frequency: 20 # the simulation is 20 fps upper_fov: 2 lower_fov: -25 - dropoff_general_rate: 0.1 + dropoff_general_rate: 0.3 dropoff_intensity_limit: 0.7 - dropoff_zero_intensity: 0.15 + dropoff_zero_intensity: 0.4 noise_stddev: 0.02 localization: &base_localize activate: true # when not activated, ego position will be retrieved from server directly @@ -119,7 +146,12 @@ carla_traffic_manager: # define scenario. In this scenario, a 4-vehicle platoon already exists. scenario: - single_cav_list: # this is for merging vehicle or single cav without v2x + rsu_list: + - <<: *rsu_base + spawn_position: [12.00, 192.31, 3.0] + id: -1 + + single_cav_list: - <<: *vehicle_base spawn_position: [12.00, 192.31, 0.3, 0, -90, 0] destination: [67.12, 150.2, 1.0] diff --git a/opencda/scenario_testing/utils/sim_api.py b/opencda/scenario_testing/utils/sim_api.py index 81e30ced..b9687acb 100644 --- a/opencda/scenario_testing/utils/sim_api.py +++ b/opencda/scenario_testing/utils/sim_api.py @@ -18,6 +18,7 @@ from opencda.core.common.vehicle_manager import VehicleManager from opencda.core.application.platooning.platooning_manager import \ PlatooningManager +from opencda.core.common.rsu_manager import RSUManager from opencda.core.common.cav_world import CavWorld from opencda.scenario_testing.utils.customized_map_api import \ load_customized_world, bcolors @@ -373,6 +374,35 @@ def create_platoon_manager(self, map_helper=None, data_dump=False): return platoon_list + def create_rsu_manager(self, data_dump): + """ + Create a list of RSU. + + Parameters + ---------- + data_dump : bool + Whether to dump sensor data. + + Returns + ------- + rsu_list : list + A list contains all rsu managers.. + """ + print('Creating RSU.') + rsu_list = [] + for i, rsu_config in enumerate( + self.scenario_params['scenario']['rsu_list']): + + rsu_manager = RSUManager(self.world, rsu_config, + self.carla_map, + self.cav_world, + self.scenario_params['current_time'], + data_dump) + + rsu_list.append(rsu_manager) + + return rsu_list + def spawn_vehicles_by_list(self, tm, traffic_config, bg_list): """ Spawn the traffic vehicles by the given list. diff --git a/opencda/scenario_testing/cooperception_datadump_town06_carla.py b/opencda/scenario_testing/v2xp_datadump_town06_carla.py similarity index 87% rename from opencda/scenario_testing/cooperception_datadump_town06_carla.py rename to opencda/scenario_testing/v2xp_datadump_town06_carla.py index 68bfcdfb..7d0b5e68 100644 --- a/opencda/scenario_testing/cooperception_datadump_town06_carla.py +++ b/opencda/scenario_testing/v2xp_datadump_town06_carla.py @@ -37,6 +37,8 @@ def run_scenario(opt, config_yaml): single_cav_list = \ scenario_manager.create_vehicle_manager(application=['single'], data_dump=True) + rsu_list = \ + scenario_manager.create_rsu_manager(data_dump=True) # create background traffic in carla traffic_manager, bg_veh_list = \ @@ -53,12 +55,11 @@ def run_scenario(opt, config_yaml): # save the data collection protocol to the folder current_path = os.path.dirname(os.path.realpath(__file__)) save_yaml_name = os.path.join(current_path, - '../../data_dumping', - scenario_params['current_time'], - 'data_protocol.yaml') + '../../data_dumping', + scenario_params['current_time'], + 'data_protocol.yaml') save_yaml(scenario_params, save_yaml_name) - # run steps while True: scenario_manager.tick() transform = single_cav_list[0].vehicle.get_transform() @@ -75,6 +76,10 @@ def run_scenario(opt, config_yaml): control = single_cav.run_step() single_cav.vehicle.apply_control(control) + for rsu in rsu_list: + rsu.update_info() + rsu.run_step() + finally: eval_manager.evaluate() @@ -85,6 +90,7 @@ def run_scenario(opt, config_yaml): for v in single_cav_list: v.destroy() + for r in rsu_list: + r.destroy() for v in bg_veh_list: v.destroy() -