diff --git a/examples/modd/MODD_diagram.png b/examples/modd/MODD_diagram.png new file mode 100644 index 0000000..877f39e Binary files /dev/null and b/examples/modd/MODD_diagram.png differ diff --git a/examples/modd/README.md b/examples/modd/README.md new file mode 100644 index 0000000..b927cde --- /dev/null +++ b/examples/modd/README.md @@ -0,0 +1,64 @@ +# MODD + +The MODD class receives a set of labeled traces and outputs a ODD Monitor. + +The MODD implements the boxes on the right side of the following diagram: + +![alt text](MODD_diagram.png) + + +## VerifAI Interface + +Given a specification $\varphi$, VerifAI uses the sampler, analyzer and simulator to generate a set of traces $\{\sigma_i, \ell_i\}_i$. + +The MODD receives the set of evaluated simulation traces $\{\sigma_i\}_i$, where each point $\sigma_i$ is defined by a features vector and a special feature namely the correctness of the specification, and generates a training dataset $\{\tau_i, \ell_i\}_i$, where $\tau_i$ is a vector and $\ell_i$ is a single value (Data Generation box in the diagram). + +The MODD uses then the training dataset $\{\tau_i, \ell_i'\}_i$ to train a monitor $M$ (Learner box). + +The MODD evaluates the monitor $M$ over some new simulations (Evaluation box). If the optimality objective is not met, the MODD will trigger the generation of new simulations to expand the training dataset and restart the training process. + +### Run instructions +- Create environment with python=3.9. +- Clone the repository and install VerifAI as usual: + `python -m pip install -e .` +- Change directory to examples/modd/ +- Run the example: `python ./modd_learner_follow.py`. + + + + +### Implementation details + +The MODD receives the following inputs: +- datagen_params: Parameters required to generate the training dataset: + - preprocessing function + - labeling function + - saving directory +- trainer_params: Parameters required to train the ODD Monitor: + - model to be trained (sklearn, pytorch, etc.) + - training results saving directory + - trained model saving path +- eval_params: Parameters required to evaluate the ODD Monitor: + - evaluation method + - specification + - number of simulations to run + - number of steps per simulation + - evaluation results saving directory + - evaluation results of running the monitor on simulations + - evaluation results of running the system without the monitor on the same simulations + - scenes saving path +- sampling_params: Parameters required to specify how to make calls to a sampler to generate more data: + - sampler + - server_class + - server_options + - path to controller to be monitored +- global_params: Parameters to specify how many simulations to run per loop of the MODD generation process: + - initial number of simulations + - initial number of simulation steps + - number of simulations per refinement loop + - number of steps per refinement simulation + - iterations of the refinement loop + + + + diff --git a/examples/modd/carla/controller_training.py b/examples/modd/carla/controller_training.py new file mode 100644 index 0000000..b5228f6 --- /dev/null +++ b/examples/modd/carla/controller_training.py @@ -0,0 +1,300 @@ +import pandas as pd +import numpy as np +import os +import torch +import torchvision +import random +import sys +import re +import cv2 + +print("CUDA enabled:", torch.cuda.is_available()) + + +class resNet(torch.nn.Module): + """ + Use restnet from torchvision + """ + + def __init__(self, layers="18", pre_trained=False): + super(resNet, self).__init__() + if layers == "18": + self.model = torchvision.models.resnet18(pretrained=pre_trained) + else: + raise NotImplementedError + + def forward(self, x): + return self.model(x) + + + +class convNet(torch.nn.Module): + """ + CNN to train from scratch + """ + + def __init__(self): + super(convNet, self).__init__() + self.model = torch.nn.Sequential( + torch.nn.Conv2d(3, 32, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(32, 64, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(64, 128, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.Flatten(), + ) + + def forward(self, x): + h = self.model(x) + return h + + +class CNN(torch.nn.Module): + def __init__(self, resnet=False, pretrained=False): + super(CNN, self).__init__() + if resnet: + self.model = resNet(pre_trained=pretrained) + else: + self.model = convNet() + self.fc1 = torch.nn.Linear(1000,1024) + self.head = torch.nn.Linear(1024, 2) + print(self.model) + + def forward(self, x): + x = self.model(x) + x = self.fc1(x) + h = self.head(x) + return h + + +def train_cnn( + train_dataset, + val_dataset, + save_path, + batch_size=128, + n_epochs=351, + lr=0.0005, + device="cuda", + resnet=True, + model_name="model" +): + """ + Train a CNN on the given data + """ + model = CNN(resnet=resnet).to(device) + optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5) + scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, factor=0.9, patience=10 + ) + criterion = torch.nn.MSELoss() + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=batch_size, shuffle=True, drop_last=True + ) + test_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, drop_last=True) + best_val_loss = np.inf + model.eval() + + model_parameters = filter(lambda p: p.requires_grad, model.parameters()) + params = sum([np.prod(p.size()) for p in model_parameters]) + print(f"Number of parameters: {params}") + with torch.no_grad(): + test_loss = 0 + for x_batch, y_batch in test_loader: + y_batch = y_batch.reshape((batch_size, 2)).to(device) + y_pred = model(x_batch.to(device)) + test_loss += criterion(y_pred, y_batch).item() + test_loss /= len(test_loader) + print(f"Epoch {-1}: val loss = {test_loss}") + for epoch in range(n_epochs): + model.train() + total_loss = 0 + for loaded_batch in train_loader: + x_batch, y_batch = loaded_batch + optimizer.zero_grad() + y_batch = y_batch.reshape((batch_size, 2)).to(device) + y_pred = model(x_batch.to(device)) + loss = criterion(y_pred, y_batch) + total_loss += loss.item() + loss.backward() + optimizer.step() + model.eval() + with torch.no_grad(): + test_loss = 0 + for loaded_batch in test_loader: + x_batch, y_batch = loaded_batch + y_batch = y_batch.reshape((batch_size, 2)).to(device) + y_pred = model(x_batch.to(device)) + test_loss += criterion(y_pred, y_batch).item() + test_loss /= len(test_loader) + scheduler.step(test_loss) + print( + f"Epoch {epoch}: train loss = {total_loss / len(train_loader)} val loss = {test_loss}" + ) + # save model + if test_loss < best_val_loss: + best_val_loss = test_loss + torch.save(model.state_dict(), os.path.join(save_path, f"{model_name}_{epoch}.pth")) + if (epoch % 50 == 0 or epoch == n_epochs-1) and epoch > 50 : + torch.save(model.state_dict(), os.path.join(save_path, f"{model_name}_{epoch}.pth")) + return model + + +from torch.utils.data import Dataset +from torchvision.io import read_image + + +class CustomImageDataset(Dataset): + def __init__(self, annotations_file, img_dir): + self.img_labels = pd.read_csv(annotations_file) + self.img_dir = img_dir + + def __len__(self): + return len(self.img_labels) + + def __getitem__(self, idx): + img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0]) + image = read_image(img_path) / 255 # squeeze into [0, 1] + label = self.img_labels.iloc[idx, 1] + return image, torch.FloatTensor([label]) + + +def load_data_csv( + data_dir, + split_ratio=0.1, + seed=0, +): + """ + Load data and split into train validation + """ + + with open(os.path.join(data_dir, "data.csv")) as f: + data = pd.read_csv(f) + data_size = len(data) + print(data_size) + # split into train and test + data = data.sample(frac=1, random_state=seed).reset_index(drop=True) + + train_sample = int(data_size * (1 - split_ratio)) + xtrain_image = data.loc[:train_sample] + xval_image = data.loc[train_sample:] + # save xtrain_image and xval_image to data_dir + xtrain_image.to_csv(os.path.join(data_dir, "xtrain_image.csv"), index=False) + xval_image.to_csv(os.path.join(data_dir, "xval_image.csv"), index=False) + train_dataset = CustomImageDataset( + annotations_file=os.path.join(data_dir, "xtrain_image.csv"), + img_dir=os.path.join(data_dir, "imgs"), + ) + val_dataset = CustomImageDataset( + annotations_file=os.path.join(data_dir, "xval_image.csv"), + img_dir=os.path.join(data_dir, "imgs"), + ) + return train_dataset, val_dataset + +class CustomDataset(Dataset): + + def __init__(self, imgs, labels, transform=None): + super().__init__() + self.imgs = imgs + self.labels = labels + self.transform = transform + + def __len__(self): + return min(len(self.imgs), len(self.labels)) + + def __getitem__(self, index): + img = (torch.from_numpy(self.imgs[index]) / 255).permute(2,0,1)[[2,1,0],:,:] + label = self.labels[index] + + if self.transform: + return self.transform(img), torch.FloatTensor([label]) + else: + return img, torch.FloatTensor([label]) + + +def sort_paths(folder_path): + orig_paths = [p for p in os.listdir(folder_path) if re.sub("[^0-9]","",p) != ""] + M = max([len(e) for e in orig_paths]) + m = min([len(e) for e in orig_paths]) + paths = ["0" * (M-len(e)) + e for e in orig_paths] + paths = sorted(paths) + for _ in range(M-m-1): + paths = [e if e[0] != "0" else e[1:] for e in paths] + paths = [folder_path+e if e[0] != "0" else folder_path+e[1:] for e in paths] + return paths + + + +def load_data_np( + data_dir, + n_controller, + split_ratio=0.1, + seed=0, +): + """ + Load data and split into train validation + """ + if data_dir[-1] != "/": + data_dir += "/" + + dss = [] + + paths_data_dirs = [data_dir + e + "/" for e in os.listdir(data_dir) ] + first = True + i0 = 0 + for sim in paths_data_dirs: + if len(os.listdir(sim)) > 0: + loaded_imgs = [cv2.imread(img) for img in sort_paths(sim)] + imgs = np.array(loaded_imgs)[:, 80:160, 40:600] + print(imgs.shape) + dist = np.expand_dims(np.load(sim + "dist.npz")["values"], axis=0) / 30 + cte = np.expand_dims(np.load(sim + "cte.npz")["values"], axis=0) + labels = np.transpose(np.concatenate((cte,dist))) + if first: + first = False + ds = CustomDataset(imgs, labels) + i0 = 1 + else: + dsi = CustomDataset(imgs, labels) + ds = torch.utils.data.ConcatDataset([ds,dsi]) + i0 +=1 + else: + print(f"Folder {sim} should be removed. CARLA NOT LOADED") + print(f"Total of {len(ds)} images") + + train_dataset, val_dataset = torch.utils.data.random_split(ds, [1-split_ratio, split_ratio]) + + return train_dataset, val_dataset + + +import argparse + +parser = argparse.ArgumentParser(description="Train CNN") +parser.add_argument("--data_dir", type=str, default="./training_data/") +parser.add_argument("--model_dir", type=str, default="./models") +parser.add_argument("--model_name", type=str, default="model") +parser.add_argument("--n_controller", type=int, default=0) +args = parser.parse_args() + +if __name__ == "__main__": + + import os + + sys.setrecursionlimit(10000) + + device="cuda" + + os.makedirs(args.model_dir, exist_ok=True) + n = args.n_controller + train_dataset, val_dataset = load_data_np(args.data_dir, n) + print(train_dataset[0]) + train_cnn( + train_dataset=train_dataset, + val_dataset=val_dataset, + save_path=args.model_dir, + resnet=True, + model_name=f"controller_cte_dist", + ) + \ No newline at end of file diff --git a/examples/modd/carla/data_generation.py b/examples/modd/carla/data_generation.py new file mode 100644 index 0000000..a461ff1 --- /dev/null +++ b/examples/modd/carla/data_generation.py @@ -0,0 +1,77 @@ + +import math +import os.path +import sys +import os +import shutil +import argparse +import pickle +import numpy as np +import time +import scenic +from dotmap import DotMap +from PIL import Image +import cv2 + +import pandas as pd + + + +# Arg Parser +parser = argparse.ArgumentParser(description='modd',usage='later', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + +## arguments +parser.add_argument('scenic_path', help='path to scenic file', metavar='scenic_path') +parser.add_argument("--data_dir", type=str, default="./training_data/") +parser.add_argument('--num_sim', help='number of simulations used per monitor', type=int, default=1) +parser.add_argument('--num_steps', help='number of steps per simulation',type=int,default=60) + + +args = parser.parse_args() + + + +## assignments + +scenic_path = args.scenic_path +num_of_simulations = args.num_sim +num_of_steps = args.num_steps +data_dir = args.data_dir + + + + +last_folder = 0 + + +for i in range(last_folder, num_of_simulations): + + + # Initialize logger folder sstructure + if i==0: + if os.path.exists(data_dir): + shutil.rmtree(data_dir) + os.mkdir(data_dir) + + + sim_dir = f"{data_dir}{i}" + if not os.path.exists(sim_dir): + # shutil.rmtree(modd_dir) + os.mkdir(sim_dir) + + flag_good = False + print(f"Executing sim {i}") + try: + os.system(f"scenic -S {scenic_path} --count 1 --time {num_of_steps} --2d --param recordFolder {sim_dir} --param timeout 30") + flag_good = True + except: + print(f"Simulation {j} failed.") + wait(5) + os.system(f"scenic -S {scenic_path} --count 1 --time {num_of_steps} --2d --param recordFolder {sim_dir} --param timeout 30") + + + + + + + diff --git a/examples/modd/carla/followLeader_datagen.scenic b/examples/modd/carla/followLeader_datagen.scenic new file mode 100644 index 0000000..335ff9f --- /dev/null +++ b/examples/modd/carla/followLeader_datagen.scenic @@ -0,0 +1,271 @@ +""" Scenario Description +The ego car follows the leader car maintaining a normal distance while lane keeping. +""" +import pickle +import sklearn +import numpy as np +import random +import os +import re +from scenic.domains.driving.controllers import ( + PIDLateralController, + PIDLongitudinalController, +) + + +param timeout = 30 +param map = localPath('../../../tests/scenic/Town01.xodr') +param carla_map = 'Town01' +param render = 1 + +model scenic.simulators.carla.model + +#CONSTANTS +EGO_MODEL = "vehicle.tesla.model3" +LEADER_SPEED = Range(8,12) +EGO_SPEED = 5 +THROTTLE_ACTION = 0.6 +BRAKE_ACTION = 1.0 +EGO_TO_LEADER = Range(-15, -10) +EGO_BRAKING_THRESHOLD = 10 + + +behavior FollowLaneBehaviorModified(target_speed = 10, laneToFollow=None, is_oppositeTraffic=False, leaderCar=None): + """ + Follow's the lane on which the vehicle is at, unless the laneToFollow is specified. + Once the vehicle reaches an intersection, by default, the vehicle will take the straight route. + If straight route is not available, then any availble turn route will be taken, uniformly randomly. + If turning at the intersection, the vehicle will slow down to make the turn, safely. + + This behavior does not terminate. A recommended use of the behavior is to accompany it with condition, + e.g. do FollowLaneBehavior() until ... + + :param target_speed: Its unit is in m/s. By default, it is set to 10 m/s + :param laneToFollow: If the lane to follow is different from the lane that the vehicle is on, this parameter can be used to specify that lane. By default, this variable will be set to None, which means that the vehicle will follow the lane that it is currently on. + """ + + past_steer_angle = 0 + past_speed = 0 # making an assumption here that the agent starts from zero speed + if laneToFollow is None: + current_lane = self.lane + else: + current_lane = laneToFollow + + current_centerline = current_lane.centerline + in_turning_lane = False # assumption that the agent is not instantiated within a connecting lane + intersection_passed = False + entering_intersection = False # assumption that the agent is not instantiated within an intersection + end_lane = None + original_target_speed = target_speed + TARGET_SPEED_FOR_TURNING = 3 # KM/H + TRIGGER_DISTANCE_TO_SLOWDOWN = 10 # FOR TURNING AT INTERSECTIONS + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + # instantiate longitudinal and lateral controllers + _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + + while True: + + if self.speed is not None: + current_speed = self.speed + else: + current_speed = past_speed + + if not entering_intersection and (distance from self.position to nearby_intersection) < TRIGGER_DISTANCE_TO_SLOWDOWN: + entering_intersection = True + intersection_passed = False + + if distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + if leaderCar is None: + maneuvers = current_lane.maneuvers + select_maneuver = Uniform(*maneuvers) + self.select_maneuver = select_maneuver + print(self.select_maneuver) + else: + select_maneuver = leaderCar.select_maneuver + + elif len(current_lane.maneuvers) > 0: + select_maneuver = Uniform(*current_lane.maneuvers) + print(select_maneuver) + else: + take SetBrakeAction(1.0) + break + + # print(select_maneuver.type) + + # assumption: there always will be a maneuver + if select_maneuver.connectingLane != None: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.connectingLane.centerline, \ + select_maneuver.endLane.centerline]) + else: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.endLane.centerline]) + + current_lane = select_maneuver.endLane + end_lane = current_lane + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + if select_maneuver.type != ManeuverType.STRAIGHT: + in_turning_lane = True + target_speed = TARGET_SPEED_FOR_TURNING + + # do TurnBehavior(trajectory = current_centerline, target_speed=target_speed) + trajectory = current_centerline + target_speed = target_speed + if isinstance(trajectory, PolylineRegion): + trajectory_centerline = trajectory + else: + trajectory_centerline = concatenateCenterlines([traj.centerline for traj in trajectory]) + + # instantiate longitudinal and lateral controllers + # _lon_controller, _lat_controller = simulation().getTurningControllers(self) + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.8, K_D=0.6, K_I=0.0, dt=dt) + + past_steer_angle = 0 + + while self in network.intersectionRegion: + if self.speed is not None: + current_speed = self.speed + else: + current_speed = 0 + + cte = trajectory_centerline.signedDistanceTo(self.position) + speed_error = target_speed - current_speed + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Latitudinal Control + current_steer_angle = _lat_controller.run_step(cte) + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + + + if (end_lane is not None) and (self.position in end_lane) and not intersection_passed: + intersection_passed = True + in_turning_lane = False + entering_intersection = False + target_speed = original_target_speed + # _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.2, K_D=0.5, K_I=0.0, dt=dt) + + nearest_line_points = current_centerline.nearestSegmentTo(self.position) + nearest_line_segment = PolylineRegion(nearest_line_points) + self.cte = nearest_line_segment.signedDistanceTo(self.position) + if is_oppositeTraffic: + self.cte = -self.cte + + #if leaderCar: + # print(self.cte) + speed_error = target_speed - current_speed + + #if leaderCar: + # speed_error += random.randrange(-2,2) + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Lateral Control + # if distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + # print("Entering intersection?") + # print(len(current_lane.maneuvers)) + # self.cte = 0 + current_steer_angle = _lat_controller.run_step(self.cte) + + if leaderCar: + # current_steer_angle += 1/2 * random.randrange(-1,2) * random.random() + if distance from self to leaderCar > 10: + throttle = 0.6 + if distance from self to leaderCar < 4: + take SetBrakeAction(1.0) + + + + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + past_speed = current_speed + + + +#EGO BEHAVIOR: Follow lane and brake when reaches threshold distance to obstacle +behavior EgoBehavior(speed, leader, leaderCar=None): + try: + do FollowLaneBehaviorModified(target_speed=speed) + + interrupt when self._intersection: + if leader: + maneuvers = self._intersection.maneuversAt(self.position) + select_maneuver = Uniform(maneuvers) + if maneuver.connectingLane: + self.trajectory = [maneuver.startLane, maneuver.connectingLane, maneuver.endLane] + else: + self.trajectory = [maneuver.startLane, maneuver.endLane] + #print(maneuver.type) + do FollowTrajectoryBehavior(trajectory=self.trajectory, target_speed=EGO_SPEED) + else: + do FollowTrajectoryBehavior(trajectory=leaderCar.trajectory, target_speed=EGO_SPEED) + +#PLACEMENT +lane = Uniform(*network.lanes) +trajectory = [lane] +#for _ in range(1): +maneuver = Uniform(*lane.maneuvers) +trajectory += [maneuver.connectingLane, maneuver.endLane] + + +attrs = {"image_size_x": 640, + "image_size_y": 320} + +start = new OrientedPoint on lane.centerline +leader = new Car at start, + with blueprint EGO_MODEL, + with behavior FollowLaneBehaviorModified(target_speed=EGO_SPEED), + with color Color(0,0,0) + +ego = new Car following roadDirection from leader.position for EGO_TO_LEADER, + with blueprint EGO_MODEL, + with behavior FollowLaneBehaviorModified(target_speed=EGO_SPEED, leaderCar=leader), + with cte 0, + with sensors {"front_rgb": RGBSensor(offset=(0, 2, 1), attributes=attrs) + } + + + + +require distance to intersection > 10 and distance from leader to intersection > 10 and ego can see leader + +record ego.position.x as ego_position_x +record ego.position.y as ego_position_y +record obstacle.position.x as obstacle_position_x +record obstacle.position.y as obstacle_position_y + + + + + + +record ego.distanceToClosest(Car) every 0.2 seconds after 3 seconds to "dist.npz" +record ego.speed every 0.2 seconds after 3 seconds to "speed.npz" +record leader.speed every 0.2 seconds after 3 seconds to "leader_speed.npz" +record ego.cte every 0.2 seconds after 3 seconds to "cte.npz" +record ego.observations["front_rgb"] every 0.2 seconds after 3 seconds to "front_rgb_{time:.1f}.jpg" + + +terminate when ((leader.speed < 0.1 or ego.speed < 0.1) and (distance to start) > 1) diff --git a/examples/modd/carla/followLeader_extracar.scenic b/examples/modd/carla/followLeader_extracar.scenic new file mode 100644 index 0000000..da0465a --- /dev/null +++ b/examples/modd/carla/followLeader_extracar.scenic @@ -0,0 +1,524 @@ +""" Scenario Description +The ego car follows the leader car maintaining a normal distance while lane keeping. +""" + +import warnings +warnings.filterwarnings("ignore") + +import pickle +import sklearn +import numpy as np +import random +import os +import re +import torch + +import pandas as pd +import torch +import torchvision +import sys +import cv2 +from scenic.domains.driving.controllers import ( + PIDLateralController, + PIDLongitudinalController, +) + +if globalParameters.seed != "": + random.seed(globalParameters.seed) + np.random.seed(globalParameters.seed) + +torch.set_default_device('cuda') + +class resNet(torch.nn.Module): + """ + Use restnet from torchvision + """ + + def __init__(self, layers="18", pre_trained=False): + super(resNet, self).__init__() + if layers == "18": + self.model = torchvision.models.resnet18(pretrained=pre_trained) + else: + raise NotImplementedError + + def forward(self, x): + return self.model(x) + + + +class convNet(torch.nn.Module): + """ + CNN to train from scratch + """ + + def __init__(self): + super(convNet, self).__init__() + self.model = torch.nn.Sequential( + torch.nn.Conv2d(3, 32, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(32, 64, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(64, 128, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.Flatten(), + ) + + def forward(self, x): + h = self.model(x) + return h + + +class CNN(torch.nn.Module): + def __init__(self, resnet=False, pretrained=False): + super(CNN, self).__init__() + if resnet: + self.model = resNet(pre_trained=pretrained) + else: + self.model = convNet() + self.fc1 = torch.nn.Linear(1000,1024) + self.head = torch.nn.Linear(1024, 2) + + def forward(self, x): + x = self.model(x) + x = self.fc1(x) + h = self.head(x) + return h + + +param timeout = 180 +param map = localPath('../../../tests/scenic/Town01.xodr') +param carla_map = 'Town01' +param timeBound = 300 + + +model scenic.simulators.carla.model + +#CONSTANTS +EGO_MODEL = "vehicle.tesla.model3" +# Old speed before TimeSeries: +# LEADER_SPEED = Range(8,12) +LEADER_SPEED = TimeSeries(VerifaiRange(6,8)) +EGO_SPEED = 3 +THROTTLE_ACTION = 0.5 +BRAKE_ACTION = 1.0 +EGO_TO_LEADER = Range(-15, -10) +EGO_BRAKING_THRESHOLD = 6 +EGO_ACCELERATION_THRESHOLD = 10 + +monitor_model = None +if globalParameters.monitor != "": + with open(globalParameters.monitor, 'rb') as f: + monitor_model = pickle.load(f) + + + + +behavior FollowLaneBehaviorModified(target_speed = 10, laneToFollow=None, is_oppositeTraffic=False, leaderCar=None): + """ + Follow's the lane on which the vehicle is at, unless the laneToFollow is specified. + Once the vehicle reaches an intersection, by default, the vehicle will take the straight route. + If straight route is not available, then any availble turn route will be taken, uniformly randomly. + If turning at the intersection, the vehicle will slow down to make the turn, safely. + + This behavior does not terminate. A recommended use of the behavior is to accompany it with condition, + e.g. do FollowLaneBehavior() until ... + + :param target_speed: Its unit is in m/s. By default, it is set to 10 m/s + :param laneToFollow: If the lane to follow is different from the lane that the vehicle is on, this parameter can be used to specify that lane. By default, this variable will be set to None, which means that the vehicle will follow the lane that it is currently on. + """ + + past_steer_angle = 0 + past_speed = 0 # making an assumption here that the agent starts from zero speed + if laneToFollow is None: + if leaderCar: + current_lane = leaderCar.lane + else: + current_lane = self.lane + else: + current_lane = laneToFollow + + current_centerline = current_lane.centerline + in_turning_lane = False # assumption that the agent is not instantiated within a connecting lane + intersection_passed = False + entering_intersection = False # assumption that the agent is not instantiated within an intersection + end_lane = None + original_target_speed = target_speed + TARGET_SPEED_FOR_TURNING = 3 # KM/H + TRIGGER_DISTANCE_TO_SLOWDOWN = 20 # FOR TURNING AT INTERSECTIONS + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + # instantiate longitudinal and lateral controllers + _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + + if leaderCar is None: + self.steps_speed = 0 + else: + steps_running = 0 + + while True: + + if self.speed is not None: + current_speed = self.speed + else: + current_speed = past_speed + + if not entering_intersection and (distance from self.position to nearby_intersection) < TRIGGER_DISTANCE_TO_SLOWDOWN: + entering_intersection = True + intersection_passed = False + + if distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + if leaderCar is None: + maneuvers = current_lane.maneuvers + turn_maneuvers = filter(lambda i : i.type != ManeuverType.STRAIGHT, maneuvers) + if len(turn_maneuvers) > 0: + select_maneuver = Uniform(*turn_maneuvers) + self.turn_maneuver = select_maneuver + else: + select_maneuver = Uniform(*maneuvers) + self.select_maneuver = select_maneuver + else: + select_maneuver = leaderCar.turn_maneuver + # print(select_maneuver.type) + + elif len(current_lane.maneuvers) > 0: + select_maneuver = Uniform(*current_lane.maneuvers) + else: + take SetBrakeAction(1.0) + break + + # print(select_maneuver.type) + + # assumption: there always will be a maneuver + if select_maneuver.connectingLane != None: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.connectingLane.centerline, \ + select_maneuver.endLane.centerline]) + else: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.endLane.centerline]) + + + current_lane = select_maneuver.endLane + end_lane = current_lane + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + if select_maneuver.type != ManeuverType.STRAIGHT: + if leaderCar is None: + self.turn_centerline = current_centerline + else: + current_centerline = leaderCar.turn_centerline + + + in_turning_lane = True + target_speed = TARGET_SPEED_FOR_TURNING + + # do TurnBehavior(trajectory = current_centerline, target_speed=target_speed) + trajectory = current_centerline + target_speed = target_speed + if isinstance(trajectory, PolylineRegion): + trajectory_centerline = trajectory + else: + trajectory_centerline = concatenateCenterlines([traj.centerline for traj in trajectory]) + + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.8, K_D=0.6, K_I=0.0, dt=dt) + + past_steer_angle = 0 + + while distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + if self.speed is not None: + current_speed = self.speed + else: + current_speed = 0 + + cte = trajectory_centerline.signedDistanceTo(self.position) + + speed_error = target_speed - current_speed + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Latitudinal Control + current_steer_angle = _lat_controller.run_step(cte) + + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + + + if leaderCar is None: + self.steps_speed += 1 + if self.steps_speed > 20: + original_target_speed = LEADER_SPEED.getSample() + self.steps_speed = 0 + else: + steps_running += 1 + + if leaderCar and monitor_model and steps_running > 80: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + input_features = np.expand_dims(input_features, axis=0) + self.isSafe = monitor_model.predict(input_features)[0] + + + if (end_lane is not None) and (self.position in end_lane) and not intersection_passed: + intersection_passed = True + in_turning_lane = False + entering_intersection = False + target_speed = original_target_speed + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.2, K_D=0.1, K_I=0.0, dt=dt) + + nearest_line_points = current_centerline.nearestSegmentTo(self.position) + nearest_line_segment = PolylineRegion(nearest_line_points) + self.cte = nearest_line_segment.signedDistanceTo(self.position) + if is_oppositeTraffic: + self.cte = -self.cte + + speed_error = target_speed - current_speed + + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Lateral Control + current_steer_angle = _lat_controller.run_step(self.cte) + + if leaderCar: + if distance from self to leaderCar < EGO_BRAKING_THRESHOLD: + take SetBrakeAction(1.0) + + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + past_speed = current_speed + + + if leaderCar is None: + self.steps_speed += 1 + if self.steps_speed > 20: + original_target_speed = LEADER_SPEED.getSample() + self.steps_speed = 0 + + else: + steps_running += 1 + + if leaderCar and monitor_model and steps_running > 80: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + input_features = np.expand_dims(input_features, axis=0) + self.isSafe = monitor_model.predict(input_features)[0] + + + + + +behavior EgoBehavior(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + if monitor_model is None: + do ControllerBehavior(target_speed, controller_path, leaderCar) + else: + try: + do ControllerBehavior(target_speed=target_speed, controller_path=controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + interrupt when not self.isSafe: + print("-- Monitor: Not safe! Switching to SafeBehavior.") + take SetBrakeAction(1.0) + do EgoBehavior2(target_speed = target_speed, controller_path = controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + +behavior EgoBehavior2(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + try: + do FollowLaneBehaviorModified(target_speed=target_speed, leaderCar=leader) + interrupt when self.isSafe: + print("-- Monitor: Safe! Switching back to CNN Controller.") + do EgoBehavior(target_speed=target_speed, controller_path=controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + + + +behavior ControllerBehavior(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + TARGET_SPEED_FOR_TURNING = 3 # KM/H + + past_steer_angle = 0 + past_speed = 0 # making an assumption here that the agent starts from zero speed + + original_target_speed = target_speed + + current_lane = self.lane + nearby_intersection = current_lane.centerline[-1] + + + # instantiate longitudinal and lateral controllers + # _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller_turn = PIDLateralController(K_P=0.8, K_D=0.2, K_I=0.0, dt=dt) + _lat_controller_straight = PIDLateralController(K_P=0.2, K_D=0.1, K_I=0.0, dt=dt) + + # instantiate model + controller = CNN(resnet=True) + controller.cuda() + controller.load_state_dict(torch.load(controller_path)) + controller.eval() + + # Features for monitor + if monitor_model: + color = obstacle.color + self.r,self.g,self.b = color.r, color.g, color.b + self.weather = np.array([ + self.carlaActor.get_world().get_weather().cloudiness, + self.carlaActor.get_world().get_weather().precipitation, + self.carlaActor.get_world().get_weather().precipitation_deposits, + self.carlaActor.get_world().get_weather().wind_intensity, + self.carlaActor.get_world().get_weather().sun_azimuth_angle, + self.carlaActor.get_world().get_weather().sun_altitude_angle, + self.carlaActor.get_world().get_weather().fog_density, + self.carlaActor.get_world().get_weather().fog_distance, + self.carlaActor.get_world().get_weather().wetness, + self.carlaActor.get_world().get_weather().fog_falloff, + self.carlaActor.get_world().get_weather().scattering_intensity, + self.carlaActor.get_world().get_weather().mie_scattering_scale, + self.carlaActor.get_world().get_weather().rayleigh_scattering_scale, + self.carlaActor.get_world().get_weather().dust_storm + ]) + + while True: + front_img = self.sensors["front_rgb"]._lastObservation + if isinstance(front_img, np.ndarray): + front_img = front_img[80:160, 40:600,:] / 255 + _dot = controller(torch.Tensor(front_img).permute(2,0,1).unsqueeze(0).cuda()) + _dot = _dot.cpu().detach().numpy()[0] + cte_pred, dist_pred = _dot[0].item(), _dot[1].item() * 30 + else: + cte_pred, dist_pred = 0, 0 + + if self.speed is not None: + current_speed = self.speed + else: + current_speed = past_speed + + speed_error = target_speed - current_speed + + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Lateral Control + if abs(cte_pred) > 0.5: + _lat_controller = _lat_controller_turn + else: + _lat_controller = _lat_controller_straight + current_steer_angle = _lat_controller.run_step(cte_pred) + + if dist_pred > EGO_ACCELERATION_THRESHOLD: + throttle = THROTTLE_ACTION + if dist_pred < EGO_BRAKING_THRESHOLD: + take SetBrakeAction(1.0) + else: + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + past_speed = current_speed + + + # Monitor trigger + if monitor_model: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + distLeader = distance from self to leader + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + input_features = np.expand_dims(input_features, axis=0) + self.isSafe = monitor_model.predict(input_features)[0] + + + + + + +#PLACEMENT +lane = Uniform(*network.lanes) +trajectory = [lane] + + +# PLACEMENT +EGO_TO_LEADER = Range(-15,10) +intersec = Uniform(*network.intersections) +turn_maneuvers = filter(lambda i: i.type != ManeuverType.STRAIGHT, intersec.maneuvers) +turn_maneuver = Uniform(*turn_maneuvers) +startLane = turn_maneuver.startLane +start = startLane.centerline[-1] + +leader = new Car following roadDirection from start for -5, + with blueprint EGO_MODEL, + with behavior FollowLaneBehaviorModified(target_speed=LEADER_SPEED), + with steps_speed 0, + with color Color(0,0,0), + with time_steps 0 + +obstacle = new Car at 0 @ 25 relative to leader, + with heading leader + +ego = new Car following roadDirection from leader.position for EGO_TO_LEADER, + with blueprint EGO_MODEL, + with behavior EgoBehavior(target_speed=EGO_SPEED, controller_path=globalParameters.monitor, leaderCar=leader, obstacleCar=obstacle), + with cte 0, + with distIntersec EGO_TO_LEADER, + with isSafe 1, + with sensors {"front_rgb": RGBSensor(offset=(0, 2, 1), width=640, height=320), + "aerial_rgb": RGBSensor(offset=(0, -10, 4), width=1280, height=640) + }, + + + +require ego can see leader + + + +record distance from ego to leader as distLeader # to "dist_follow_leader.npz" +record distance from ego to intersection as distIntersection # to "dist_intersection.npz" +record distance from ego to obstacle as distObstacle # to "dist_obstacle.npz" +record ego can see obstacle as visibleObstacle +record ego can see leader as visibleLeader +if globalParameters.monitor != "": + record ego.isSafe as MonitorNotTriggered +record initial obstacle.color as colorObstacle # to "color.npz" +record initial np.array([ + ego.carlaActor.get_world().get_weather().cloudiness, + ego.carlaActor.get_world().get_weather().precipitation, + ego.carlaActor.get_world().get_weather().precipitation_deposits, + ego.carlaActor.get_world().get_weather().wind_intensity, + ego.carlaActor.get_world().get_weather().sun_azimuth_angle, + ego.carlaActor.get_world().get_weather().sun_altitude_angle, + ego.carlaActor.get_world().get_weather().fog_density, + ego.carlaActor.get_world().get_weather().fog_distance, + ego.carlaActor.get_world().get_weather().wetness, + ego.carlaActor.get_world().get_weather().fog_falloff, + ego.carlaActor.get_world().get_weather().scattering_intensity, + ego.carlaActor.get_world().get_weather().mie_scattering_scale, + ego.carlaActor.get_world().get_weather().rayleigh_scattering_scale, + ego.carlaActor.get_world().get_weather().dust_storm + ]) as weather_props # to "weather.npz" diff --git a/examples/modd/carla/followLeader_extracar_NN.scenic b/examples/modd/carla/followLeader_extracar_NN.scenic new file mode 100644 index 0000000..2154094 --- /dev/null +++ b/examples/modd/carla/followLeader_extracar_NN.scenic @@ -0,0 +1,550 @@ +""" Scenario Description +The ego car follows the leader car maintaining a normal distance while lane keeping. +""" + +import warnings +warnings.filterwarnings("ignore") + +import pickle +import sklearn +import numpy as np +import random +import os +import re +import torch + +import pandas as pd +import torch +import torchvision +import sys +import cv2 +from scenic.domains.driving.controllers import ( + PIDLateralController, + PIDLongitudinalController, +) + +if globalParameters.seed != "": + random.seed(globalParameters.seed) + np.random.seed(globalParameters.seed) + +torch.set_default_device('cuda') + +class resNet(torch.nn.Module): + """ + Use restnet from torchvision + """ + + def __init__(self, layers="18", pre_trained=False): + super(resNet, self).__init__() + if layers == "18": + self.model = torchvision.models.resnet18(pretrained=pre_trained) + else: + raise NotImplementedError + + def forward(self, x): + return self.model(x) + + + +class convNet(torch.nn.Module): + """ + CNN to train from scratch + """ + + def __init__(self): + super(convNet, self).__init__() + self.model = torch.nn.Sequential( + torch.nn.Conv2d(3, 32, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(32, 64, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.MaxPool2d(kernel_size=2), + torch.nn.Conv2d(64, 128, kernel_size=3, stride=2), + torch.nn.LeakyReLU(), + torch.nn.Flatten(), + ) + + def forward(self, x): + h = self.model(x) + return h + + +class CNN(torch.nn.Module): + def __init__(self, resnet=False, pretrained=False): + super(CNN, self).__init__() + if resnet: + self.model = resNet(pre_trained=pretrained) + else: + self.model = convNet() + self.fc1 = torch.nn.Linear(1000,1024) + self.head = torch.nn.Linear(1024, 2) + + def forward(self, x): + x = self.model(x) + x = self.fc1(x) + h = self.head(x) + return h + + +class MLP(torch.nn.Module): + def __init__(self): + super(MLP, self).__init__() + self.layers = torch.nn.Sequential( + torch.nn.Linear(21, 100), + torch.nn.ReLU(), + torch.nn.Linear(100, 100), + torch.nn.ReLU(), + torch.nn.Linear(100, 1) + ) + + def forward(self, x): + x = self.layers(x) + return x + +param timeout = 180 +param map = localPath('../../../tests/scenic/Town01.xodr') +param carla_map = 'Town01' +param timeBound = 300 + +model scenic.simulators.carla.model + +#CONSTANTS +EGO_MODEL = "vehicle.tesla.model3" +LEADER_SPEED = TimeSeries(VerifaiRange(6,8)) +EGO_SPEED = 3 +THROTTLE_ACTION = 0.5 +BRAKE_ACTION = 1.0 +EGO_TO_LEADER = Range(-15, -10) +EGO_BRAKING_THRESHOLD = 6 +EGO_ACCELERATION_THRESHOLD = 10 + + +monitor_model = None +#if globalParameters.monitor != "": +# with open(globalParameters.monitor, 'rb') as f: +# monitor_model = pickle.load(f) + + + +if globalParameters.monitor != "": + monitor_model = MLP() + monitor_model.cuda() + monitor_model.load_state_dict(torch.load(globalParameters.monitor)) + monitor_model.eval() + print("Monitor loaded") + + + + +behavior FollowLaneBehaviorModified(target_speed = 10, laneToFollow=None, is_oppositeTraffic=False, leaderCar=None): + """ + Follow's the lane on which the vehicle is at, unless the laneToFollow is specified. + Once the vehicle reaches an intersection, by default, the vehicle will take the straight route. + If straight route is not available, then any availble turn route will be taken, uniformly randomly. + If turning at the intersection, the vehicle will slow down to make the turn, safely. + + This behavior does not terminate. A recommended use of the behavior is to accompany it with condition, + e.g. do FollowLaneBehavior() until ... + + :param target_speed: Its unit is in m/s. By default, it is set to 10 m/s + :param laneToFollow: If the lane to follow is different from the lane that the vehicle is on, this parameter can be used to specify that lane. By default, this variable will be set to None, which means that the vehicle will follow the lane that it is currently on. + """ + + past_steer_angle = 0 + past_speed = 0 # making an assumption here that the agent starts from zero speed + if laneToFollow is None: + if leaderCar: + current_lane = leaderCar.lane + else: + current_lane = self.lane + else: + current_lane = laneToFollow + + current_centerline = current_lane.centerline + in_turning_lane = False # assumption that the agent is not instantiated within a connecting lane + intersection_passed = False + entering_intersection = False # assumption that the agent is not instantiated within an intersection + end_lane = None + original_target_speed = target_speed + TARGET_SPEED_FOR_TURNING = 3 # KM/H + TRIGGER_DISTANCE_TO_SLOWDOWN = 20 # FOR TURNING AT INTERSECTIONS + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + # instantiate longitudinal and lateral controllers + _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + + if leaderCar is None: + self.steps_speed = 0 + else: + steps_running = 0 + + while True: + + if self.speed is not None: + current_speed = self.speed + else: + current_speed = past_speed + + if not entering_intersection and (distance from self.position to nearby_intersection) < TRIGGER_DISTANCE_TO_SLOWDOWN: + entering_intersection = True + intersection_passed = False + + if distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + if leaderCar is None: + maneuvers = current_lane.maneuvers + turn_maneuvers = filter(lambda i : i.type != ManeuverType.STRAIGHT, maneuvers) + if len(turn_maneuvers) > 0: + select_maneuver = Uniform(*turn_maneuvers) + self.turn_maneuver = select_maneuver + else: + select_maneuver = Uniform(*maneuvers) + self.select_maneuver = select_maneuver + else: + select_maneuver = leaderCar.turn_maneuver + print(select_maneuver.type) + + elif len(current_lane.maneuvers) > 0: + select_maneuver = Uniform(*current_lane.maneuvers) + else: + take SetBrakeAction(1.0) + break + + # print(select_maneuver.type) + + # assumption: there always will be a maneuver + if select_maneuver.connectingLane != None: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.connectingLane.centerline, \ + select_maneuver.endLane.centerline]) + else: + current_centerline = concatenateCenterlines([current_centerline, select_maneuver.endLane.centerline]) + + + current_lane = select_maneuver.endLane + end_lane = current_lane + + if current_lane.maneuvers != (): + nearby_intersection = current_lane.maneuvers[0].intersection + if nearby_intersection == None: + nearby_intersection = current_lane.centerline[-1] + else: + nearby_intersection = current_lane.centerline[-1] + + if select_maneuver.type != ManeuverType.STRAIGHT: + if leaderCar is None: + self.turn_centerline = current_centerline + else: + current_centerline = leaderCar.turn_centerline + + + in_turning_lane = True + target_speed = TARGET_SPEED_FOR_TURNING + + # do TurnBehavior(trajectory = current_centerline, target_speed=target_speed) + trajectory = current_centerline + target_speed = target_speed + if isinstance(trajectory, PolylineRegion): + trajectory_centerline = trajectory + else: + trajectory_centerline = concatenateCenterlines([traj.centerline for traj in trajectory]) + + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.8, K_D=0.6, K_I=0.0, dt=dt) + + past_steer_angle = 0 + + while distance from self to intersection < TRIGGER_DISTANCE_TO_SLOWDOWN: + if self.speed is not None: + current_speed = self.speed + else: + current_speed = 0 + + cte = trajectory_centerline.signedDistanceTo(self.position) + speed_error = target_speed - current_speed + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Latitudinal Control + current_steer_angle = _lat_controller.run_step(cte) + + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + + if leaderCar is None: + self.steps_speed += 1 + if self.steps_speed > 20: + original_target_speed = LEADER_SPEED.getSample() + self.steps_speed = 0 + else: + steps_running += 1 + + if leaderCar and monitor_model and steps_running > 80: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + x = torch.Tensor(input_features).unsqueeze(0).cuda() + self.isSafe = monitor_model(x).cpu().detach().numpy()[0] + + + if (end_lane is not None) and (self.position in end_lane) and not intersection_passed: + intersection_passed = True + in_turning_lane = False + entering_intersection = False + target_speed = original_target_speed + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller = PIDLateralController(K_P=0.2, K_D=0.1, K_I=0.0, dt=dt) + + nearest_line_points = current_centerline.nearestSegmentTo(self.position) + nearest_line_segment = PolylineRegion(nearest_line_points) + self.cte = nearest_line_segment.signedDistanceTo(self.position) + if is_oppositeTraffic: + self.cte = -self.cte + + speed_error = target_speed - current_speed + + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Lateral Control + current_steer_angle = _lat_controller.run_step(self.cte) + + if leaderCar: + if distance from self to leaderCar < EGO_BRAKING_THRESHOLD: + take SetBrakeAction(1.0) + + + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + past_speed = current_speed + + if leaderCar is None: + self.steps_speed += 1 + if self.steps_speed > 20: + original_target_speed = LEADER_SPEED.getSample() + self.steps_speed = 0 + + else: + steps_running += 1 + + if leaderCar and monitor_model and steps_running > 80: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + input_features = np.expand_dims(input_features, axis=0) + x = torch.Tensor(input_features).unsqueeze(0).cuda() + self.isSafe = (torch.nn.Sigmoid()(monitor_model(x)).cpu().detach().numpy()[0][0] > 0.65).astype(int) + + + + + + +behavior EgoBehavior(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + if monitor_model is None: + do ControllerBehavior(target_speed, controller_path, leaderCar) + else: + try: + do ControllerBehavior(target_speed=target_speed, controller_path=controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + interrupt when not self.isSafe: + print("-- Monitor: Not safe! Switching to SafeBehavior.") + take SetBrakeAction(1.0) + do EgoBehavior2(target_speed = target_speed, controller_path = controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + +behavior EgoBehavior2(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + try: + do FollowLaneBehaviorModified(target_speed=target_speed, leaderCar=leader) + interrupt when self.isSafe: + print("-- Monitor: Safe! Switching back to CNN Controller.") + do EgoBehavior(target_speed=target_speed, controller_path=controller_path, leaderCar=leaderCar, obstacleCar=obstacleCar) + + + +behavior ControllerBehavior(target_speed = 10, controller_path = None, leaderCar=None, obstacleCar=None): + TARGET_SPEED_FOR_TURNING = 3 # KM/H + + past_steer_angle = 0 + past_speed = 0 # making an assumption here that the agent starts from zero speed + + original_target_speed = target_speed + + current_lane = self.lane + nearby_intersection = current_lane.centerline[-1] + + + # instantiate longitudinal and lateral controllers + # _lon_controller, _lat_controller = simulation().getLaneFollowingControllers(self) + dt = simulation().timestep + _lon_controller = PIDLongitudinalController(K_P=0.5, K_D=0.1, K_I=0.7, dt=dt) + _lat_controller_turn = PIDLateralController(K_P=0.8, K_D=0.2, K_I=0.0, dt=dt) + _lat_controller_straight = PIDLateralController(K_P=0.2, K_D=0.1, K_I=0.0, dt=dt) + + # instantiate model + controller = CNN(resnet=True) + controller.cuda() + controller.load_state_dict(torch.load(controller_path)) + controller.eval() + + # Features for monitor + if monitor_model: + color = obstacle.color + self.r,self.g,self.b = color.r, color.g, color.b + self.weather = np.array([ + self.carlaActor.get_world().get_weather().cloudiness, + self.carlaActor.get_world().get_weather().precipitation, + self.carlaActor.get_world().get_weather().precipitation_deposits, + self.carlaActor.get_world().get_weather().wind_intensity, + self.carlaActor.get_world().get_weather().sun_azimuth_angle, + self.carlaActor.get_world().get_weather().sun_altitude_angle, + self.carlaActor.get_world().get_weather().fog_density, + self.carlaActor.get_world().get_weather().fog_distance, + self.carlaActor.get_world().get_weather().wetness, + self.carlaActor.get_world().get_weather().fog_falloff, + self.carlaActor.get_world().get_weather().scattering_intensity, + self.carlaActor.get_world().get_weather().mie_scattering_scale, + self.carlaActor.get_world().get_weather().rayleigh_scattering_scale, + self.carlaActor.get_world().get_weather().dust_storm + ]) + + while True: + front_img = self.sensors["front_rgb"]._lastObservation + if isinstance(front_img, np.ndarray): + front_img = front_img[80:160, 40:600,:] / 255 + _dot = controller(torch.Tensor(front_img).permute(2,0,1).unsqueeze(0).cuda()) + _dot = _dot.cpu().detach().numpy()[0] + cte_pred, dist_pred = _dot[0].item(), _dot[1].item() * 30 + else: + cte_pred, dist_pred = 0, 0 + + if self.speed is not None: + current_speed = self.speed + else: + current_speed = past_speed + + speed_error = target_speed - current_speed + + + # compute throttle : Longitudinal Control + throttle = _lon_controller.run_step(speed_error) + + # compute steering : Lateral Control + if abs(cte_pred) > 0.5: + _lat_controller = _lat_controller_turn + else: + _lat_controller = _lat_controller_straight + current_steer_angle = _lat_controller.run_step(cte_pred) + + if dist_pred > EGO_ACCELERATION_THRESHOLD: + throttle = THROTTLE_ACTION + if dist_pred < EGO_BRAKING_THRESHOLD: + take SetBrakeAction(1.0) + else: + take RegulatedControlAction(throttle, current_steer_angle, past_steer_angle) + past_steer_angle = current_steer_angle + past_speed = current_speed + + # Monitor trigger + if monitor_model: + # Input format: (weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader])) + distIntersection = distance from self to intersection + distObstacle = distance from self to obstacle + distLeader = distance from self to leader + visibleObstacle = int(ego can see obstacle) + visibleLeader = int(ego can see leader) + + input_features = np.concatenate((self.weather, np.array([self.r,self.g,self.b, distIntersection, distObstacle, visibleObstacle, visibleLeader]))) + x = torch.Tensor(input_features).unsqueeze(0).cuda() + self.isSafe = (torch.nn.Sigmoid()(monitor_model(x)).cpu().detach().numpy()[0][0] > 0.65).astype(int) + + + + + + +#PLACEMENT +lane = Uniform(*network.lanes) +trajectory = [lane] + + + +intersec = Uniform(*network.intersections) +turn_maneuvers = filter(lambda i: i.type == ManeuverType.LEFT_TURN, intersec.maneuvers) +turn_maneuver = Uniform(*turn_maneuvers) +startLane = turn_maneuver.startLane + + +start = startLane.centerline[-1] + +leader = new Car following roadDirection from start for -5, + with blueprint EGO_MODEL, + with behavior FollowLaneBehaviorModified(target_speed=EGO_SPEED), + with steps_speed 0, + with color Color(0,0,0), + with time_steps 0 + +obstacle = new Car at 0 @ 25 relative to leader, + with heading leader + +ego = new Car following roadDirection from leader.position for EGO_TO_LEADER, + with blueprint EGO_MODEL, + with behavior EgoBehavior(target_speed=EGO_SPEED, controller_path=globalParameters.monitor, leaderCar=leader, obstacleCar=obstacle), + with cte 0, + with distIntersec EGO_TO_LEADER, + with isSafe 1, + with sensors {"front_rgb": RGBSensor(offset=(0, 2, 1), width=640, height=320), + "aerial_rgb": RGBSensor(offset=(0, -10, 4), width=1280, height=640) + }, + + + +require ego can see leader + + + + + + + + +record distance from ego to leader as distLeader # to "dist_follow_leader.npz" +record distance from ego to intersection as distIntersection # to "dist_intersection.npz" +record distance from ego to obstacle as distObstacle # to "dist_obstacle.npz" +record ego can see obstacle as visibleObstacle +record ego can see leader as visibleLeader +if globalParameters.monitor != "": + record ego.isSafe as MonitorNotTriggered +record initial obstacle.color as colorObstacle # to "color.npz" +record initial np.array([ + ego.carlaActor.get_world().get_weather().cloudiness, + ego.carlaActor.get_world().get_weather().precipitation, + ego.carlaActor.get_world().get_weather().precipitation_deposits, + ego.carlaActor.get_world().get_weather().wind_intensity, + ego.carlaActor.get_world().get_weather().sun_azimuth_angle, + ego.carlaActor.get_world().get_weather().sun_altitude_angle, + ego.carlaActor.get_world().get_weather().fog_density, + ego.carlaActor.get_world().get_weather().fog_distance, + ego.carlaActor.get_world().get_weather().wetness, + ego.carlaActor.get_world().get_weather().fog_falloff, + ego.carlaActor.get_world().get_weather().scattering_intensity, + ego.carlaActor.get_world().get_weather().mie_scattering_scale, + ego.carlaActor.get_world().get_weather().rayleigh_scattering_scale, + ego.carlaActor.get_world().get_weather().dust_storm + ]) as weather_props # to "weather.npz" diff --git a/examples/modd/carla/models/controller_cte_dist_130.pth b/examples/modd/carla/models/controller_cte_dist_130.pth new file mode 100644 index 0000000..74a29cc Binary files /dev/null and b/examples/modd/carla/models/controller_cte_dist_130.pth differ diff --git a/examples/modd/carla/static_sensors.py b/examples/modd/carla/static_sensors.py new file mode 100644 index 0000000..8c8601f --- /dev/null +++ b/examples/modd/carla/static_sensors.py @@ -0,0 +1,51 @@ +import os +from time import sleep + +import carla +import cv2 +import numpy as np + +from scenic.core.sensors import ActiveSensor + +class CarlaOtherFeaturesSensor(ActiveSensor): + def __init__( + self, + attributes=None, + ): + super().__init__() + if isinstance(attributes, str): + raise NotImplementedError( + "String parsing for attributes is not yet implemented. Feel free to do so." + ) + elif isinstance(attributes, dict): + self.attributes = attributes + else: + self.attributes = {} + + self.convert = None + if convert is not None: + if isinstance(convert, int): + self.convert = convert + elif isinstance(convert, str): + self.convert = carla.ColorConverter.names[convert] + else: + AttributeError("'convert' has to be int or string.") + + self.record_npy = record_npy + + def processing(self, data): + array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8")) + array = np.reshape(array, (data.height, data.width, 4)) # BGRA format + array = array[:, :, :3] # Take only RGB + array = array[:, :, ::-1] # Revert order + + return array, data.frame + + def save_last_observation(self, save_path, frame_number=None): + if frame_number is None: + frame_number = self.frame + save_as = os.path.join(save_path, f"{frame_number}") + if self.record_npy: + np.save(save_as, self.observation) + else: + cv2.imwrite(f"{save_as}.png", self.observation[..., ::-1]) diff --git a/examples/modd/modd_learner_main.py b/examples/modd/modd_learner_main.py new file mode 100644 index 0000000..f4d61c6 --- /dev/null +++ b/examples/modd/modd_learner_main.py @@ -0,0 +1,194 @@ + +import warnings +warnings.filterwarnings("ignore") + +import math +import os.path +import sys +import numpy as np +import torch.nn as nn +from sklearn.tree import DecisionTreeClassifier +from sklearn.neural_network import MLPClassifier +from sklearn.pipeline import Pipeline +from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import StandardScaler +from verifai.modd.odd_sampler import ODDSampler +from verifai.monitor import specification_monitor, mtl_specification +from verifai.modd.odd_learner import MODDLearner +from dotmap import DotMap +import pickle + +from verifai.samplers import ScenicSampler +from verifai.scenic_server import ScenicServer + + +VERBOSITY = 1 +MODEL = "LR" + + +def preprocessing(traces): + res = [] + for i in range(len(traces.keys())): + print(f"Simulation {i}") + t = traces[i] + # Features + color = t[0]["colorObstacle"] + r,g,b = color.r, color.g, color.b + weather = t[0]["weather_props"] + distLeaderRecord = t[0]["distLeader"] + distIntersectionRecord = t[0]["distIntersection"] + distObstacleRecord = t[0]["distObstacle"] + visibleObstacleRecord = t[0]["visibleObstacle"] + visibleLeaderRecord = t[0]["visibleLeader"] + for step in range(0, len(distLeaderRecord)-100): + # Output + distLeaderEnd = distLeaderRecord[step+100][1] + # Input + distIntersection = distIntersectionRecord[step][1] + distObstacle = distObstacleRecord[step][1] + visibleObstacle = visibleObstacleRecord[step][1] + visibleLeader = visibleLeaderRecord[step][1] + features = np.concatenate((weather, np.array([r,g,b, distIntersection, distObstacle, visibleObstacle, visibleLeader, distLeaderEnd]))) + res.append([features]) + return np.squeeze(np.array(res), axis=1) + + + +def labeler(train_data_samples): + res = train_data_samples.copy() + res[:,-1] = (train_data_samples[:,-1] < 20).astype(int) + + return res + +# Set up the parameters for the data generation. +datagen_params = DotMap( + preprocessing=preprocessing, + labeler=labeler, + datagen_save_dir="./out/samples_timeseries", + verbosity=VERBOSITY, +) + +if MODEL == "DT": + base_model = DecisionTreeClassifier(max_depth=10) + +if MODEL == "NN": + nn = MLPClassifier(solver='adam', alpha=1e-5, + hidden_layer_sizes=(100,100), max_iter=100, random_state=42, verbose=1) + base_model = Pipeline([('scaler', StandardScaler()), ('nn', nn)]) + +if MODEL == "LR": + base_model = LogisticRegression(verbose=1) + +if MODEL == "NN_TORCH": + class MLP(nn.Module): + def __init__(self): + super(MLP, self).__init__() + self.layers = nn.Sequential( + nn.Linear(21, 100), + nn.ReLU(), + nn.Linear(100, 100), + nn.ReLU(), + nn.Linear(100, 1) + ) + + def forward(self, x): + # convert tensor (128, 1, 28, 28) --> (128, 1*28*28) + x = self.layers(x) + return x + + base_model = MLP() + +trainer_params = DotMap( + model=base_model, + training_results_path="", + save_model_path="./out/lr_timeseries.pkl", + verbosity=VERBOSITY, +) + + + +def specification(trace): + verdict = 0 + + return verdict + +eval_params = DotMap( + method="conformance_testing", + specification=specification, + eval_num_simulations=200, # To indicate how many samples are required as a minimum + eval_num_steps=300, + evaluation_results_path="", # Save the evaluation_results if not empty + datagen_save_dir="./out/eval_samples_timeseries", + scenes_save_dir="./out/scene_timeseries", + datagen_nomon_save_dir="./out/eval_samples_timeseries_nomonitor", + verbosity=VERBOSITY, +) + +eval_params.save_model_path = trainer_params.save_model_path + + +class SpecMonitor(specification_monitor): + def __init__(self): + self.specification = mtl_specification(['G safe']) + super().__init__(self.specification) + + def evaluate(self, simulation): + # Get trajectories of objects from the result of the simulation + records = simulation.result.records + distLeader = records["distLeader"] + + # Compute time-stamped sequence of values for 'safe' atomic proposition; + # we'll define safe = "distance from ego to all other objects > 5" + safe_values = [] + for (_,dist) in distLeader: + # ego = positions[0] + # dist = min((ego.distanceTo(other) for other in positions[1:]), + # default=math.inf) + safe_values.append(20 - dist) + eval_dictionary = {'safe' : list(enumerate(safe_values)) } + + # Evaluate MTL formula given values for its atomic propositions + return self.specification.evaluate(eval_dictionary) + +# Load the Scenic scenario and create a sampler from it +path = os.path.join(os.path.dirname(__file__), 'carla/followLeader_extracar.scenic') + +server_options = DotMap(maxSteps=300, verbosity=VERBOSITY) + +sampling_params = DotMap( + path=path, + controller_path="./carla/models/controller_cte_dist_130.pth" + spec_monitor=SpecMonitor(), + server_type="scenic", + server_class=ScenicServer, + server_options=server_options, + verbosity=VERBOSITY, +) + + +global_params = DotMap( + initial_num_simulations=500, + initial_num_steps=300, + refinement_num_simulations=1, + refinement_num_steps=250, + refinement_iters=0, +) + + +modd = MODDLearner(datagen_params=datagen_params, + trainer_params=trainer_params, + eval_params=eval_params, + sampling_params=sampling_params, + global_params=global_params) + +# Train ODD monitor and print the results +oddMonitor = modd.run() +print('ODD monitor trained:') +print(oddMonitor) +print('Training results:') +print(modd.training_results) +print('Evaluation results:') +print(modd.evaluation_results) + + + diff --git a/examples/scenic/modd_learner_main.py b/examples/scenic/modd_learner_main.py new file mode 100644 index 0000000..62d1ef5 --- /dev/null +++ b/examples/scenic/modd_learner_main.py @@ -0,0 +1,135 @@ + +import math +import os.path +import sys +import numpy as np +from sklearn.tree import DecisionTreeClassifier +from verifai.odd_sampler import ODDSampler +from verifai.monitor import specification_monitor, mtl_specification +from verifai.oddlearner import MODDLearner +from dotmap import DotMap + +from verifai.samplers import ScenicSampler +from verifai.scenic_server import ScenicServer + + +def preprocessing(traces): + res = [] + for i in range(len(traces.keys())): + t = traces[i] + dist = t[0]["dist"][0][1] + spec = int(t[1] > 0) + res.append([dist, spec]) + return np.array(res) + +def labeler(train_data_samples): + + + return train_data_samples + +# Set up the parameters for the data generation. +datagen_params = DotMap( + preprocessing=preprocessing, + labeler=labeler, + simulations_save_dir="", + datagen_save_dir="", + verbosity=2, +) + +base_model = DecisionTreeClassifier(max_depth=10) + +learner_params = DotMap( + model=base_model, + training_results_path="", + save_model_path="./out/model.pkl", + verbosity=2, +) + + +def specification(trace): + verdict = 0 + + return verdict + +eval_params = DotMap( + method="conformance_testing", + specification=specification, + eval_num_simulations=5, # To indicate how many samples are required as a minimum + eval_num_steps=50, + evaluation_results_path="", # Save the evaluation_results if not empty + verbosity=2, +) + + +class SpecMonitor(specification_monitor): + def __init__(self): + self.specification = mtl_specification(['G safe']) + super().__init__(self.specification) + + def evaluate(self, simulation): + # Get trajectories of objects from the result of the simulation + traj = simulation.result.trajectory + + # Compute time-stamped sequence of values for 'safe' atomic proposition; + # we'll define safe = "distance from ego to all other objects > 5" + safe_values = [] + for positions in traj: + ego = positions[0] + dist = min((ego.distanceTo(other) for other in positions[1:]), + default=math.inf) + safe_values.append(dist - 5) + eval_dictionary = {'safe' : list(enumerate(safe_values)) } + + # Evaluate MTL formula given values for its atomic propositions + return self.specification.evaluate(eval_dictionary) + +# Load the Scenic scenario and create a sampler from it +path = os.path.join(os.path.dirname(__file__), 'newtonian/carlaChallenge2.scenic') +sampler = ScenicSampler.fromScenario(path, mode2D=True, params={"monitor": ""}) +sampler_eval = ScenicSampler.fromScenario(path, mode2D=True, params={"monitor": learner_params.save_model_path}) + +server_options = DotMap(maxSteps=100, verbosity=2) + +sampling_params = DotMap( + sampler=sampler, + sampler_eval=sampler_eval, + spec_monitor=SpecMonitor(), + server_type="scenic", + server_class=ScenicServer, + server_options=server_options, + verbosity=2, +) + +sampler = ODDSampler(sampling_params=sampling_params) +sampling_params = sampler.sampling_params + +global_params = DotMap( + initial_num_simulations=15, + initial_num_steps=50, + refinement_num_simulations=5, + refinement_num_steps=50, + refinement_iters=0, +) + +if not os.path.isdir("./out/"): + os.mkdir("./out/") + +modd = MODDLearner(datagen_params=datagen_params, + learner_params=learner_params, + eval_params=eval_params, + sampling_params=sampling_params, + global_params=global_params) + +# Train ODD monitor and print the results +oddMonitor = modd.run() +print('ODD monitor trained:') +print(oddMonitor) +print('Training results:') +print(modd.training_results) +print('Evaluation results:') +print(modd.evaluation_results) + + +# trace = np.random.rand(100, 14) +# label = np.random.randint(100) +# traces = [(trace, label)] diff --git a/examples/scenic/newtonian/carlaChallenge2.scenic b/examples/scenic/newtonian/carlaChallenge2.scenic index 2e5ca12..63331d4 100644 --- a/examples/scenic/newtonian/carlaChallenge2.scenic +++ b/examples/scenic/newtonian/carlaChallenge2.scenic @@ -3,6 +3,11 @@ Based on 2019 Carla Challenge Traffic Scenario 03. Leading vehicle decelerates suddenly due to an obstacle and ego-vehicle must react, performing an emergency brake or an avoidance maneuver. """ +import pickle +import sklearn +import numpy as np +import os + param map = localPath('../../../tests/scenic/Town01.xodr') param carla_map = 'Town01' model scenic.simulators.newtonian.driving_model @@ -12,12 +17,24 @@ EGO_SPEED = 10 THROTTLE_ACTION = 0.6 BRAKE_ACTION = 1.0 EGO_TO_OBSTACLE = Range(-20, -15) -EGO_BRAKING_THRESHOLD = 11 +EGO_BRAKING_THRESHOLD = 10 + +monitor_model = None +if not globalParameters.monitor is None: + monitor_model = globalParameters.monitor #EGO BEHAVIOR: Follow lane and brake when reaches threshold distance to obstacle -behavior EgoBehavior(speed=10): +behavior EgoBehavior(speed=10): try: - do FollowLaneBehavior(speed) + if monitor_model is None: + do FollowLaneBehavior(speed) + else: + dist = self.distanceToClosest(Car) + good = monitor_model.predict(np.array([[dist]])) + if good: + do FollowLaneBehavior(speed) + else: + do FollowLaneBehavior(speed/4) interrupt when withinDistanceToAnyObjs(self, EGO_BRAKING_THRESHOLD): take SetBrakeAction(BRAKE_ACTION) @@ -28,3 +45,10 @@ obstacle = new Car at 171.87 @ 2.04 ego = new Car following roadDirection from obstacle.position for EGO_TO_OBSTACLE, with behavior EgoBehavior(EGO_SPEED) + +# record ego.position.x as ego_position_x +# record ego.position.y as ego_position_y +# record obstacle.position.x as obstacle_position_x +# record obstacle.position.y as obstacle_position_y +record ego.distanceToClosest(Car) as dist +record ego.speed as speed diff --git a/examples/scenic/out/model.pkl b/examples/scenic/out/model.pkl new file mode 100644 index 0000000..79d63e0 Binary files /dev/null and b/examples/scenic/out/model.pkl differ diff --git a/src/verifai/modd/odd_data_generator.py b/src/verifai/modd/odd_data_generator.py new file mode 100644 index 0000000..7dd1c75 --- /dev/null +++ b/src/verifai/modd/odd_data_generator.py @@ -0,0 +1,126 @@ +import time +import progressbar +from verifai.server import ServerTimings +from abc import ABC +from verifai.modd.odd_sampler import ODDSampler +import pickle +import numpy as np + +class DataGenerator(ABC): + def __init__(self,datagen_params, sampling_params,global_params): + self.datagen_params = datagen_params + self.sampling_params = sampling_params + self.global_params = global_params + self.training_data = None + self.samples = {} + + def sample_simulations(self): + pass + + def generate(self): + pass + +class GenericDataGenerator(DataGenerator): + def __init__(self, datagen_params, sampling_params, global_params): + if not datagen_params.has_key("preprocessing"): + datagen_params.preprocessing = None + if not datagen_params.has_key("labeler"): + datagen_params.labeler = None + + + super().__init__(datagen_params, sampling_params, global_params) + + def sample_simulations(self, num_simulations, num_steps, save_path): + i = 0 + self.total_sample_time = 0 + self.total_simulate_time = 0 + self.sampling_params.server.maxSteps = num_steps + if self.datagen_params.verbosity >= 1: + suffix = '' + suffix = f' for {num_simulations} simulations' + suffix += f' for {num_steps} timesteps' + print('Generating data' + suffix) + if self.datagen_params.verbosity >= 2: + print(f'Server class is {type(self.sampling_params.server)}') + + if self.datagen_params.verbosity >= 1: + bar = progressbar.ProgressBar(max_value=num_simulations) + + try: + while True: + try: + start = time.time() + sample = self.sampling_params.server.get_sample() + after_sampling = time.time() + scene = self.sampling_params.server.sampler.lastScene + assert scene + result = self.sampling_params.server._simulate(scene) + if result is None: + print(self.sampling_params.server.rejectionFeedback) + return self.sampling_params.server.rejectionFeedback + print(f"Time steps run for this simulation: {len(result.trajectory)}") + value = (0 if self.sampling_params.server.monitor is None + else self.sampling_params.server.monitor.evaluate(result)) + self.sampling_params.server.lastValue = value + + after_simulation = time.time() + timings = ServerTimings(sample_time=(after_sampling - start), + simulate_time=(after_simulation - after_sampling)) + rho = self.sampling_params.server.lastValue + self.total_sample_time += timings.sample_time + self.total_simulate_time += timings.simulate_time + except: + if self.datagen_params.verbosity >= 1: + if i >= num_simulations: + print("Sampler has generated all possible samples") + else: + print("Sampling failed.") + if self.datagen_params.verbosity >= 2: + print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho, "\nRecords: ", result.records) + self.samples[i] = (result.records, rho) + if self.datagen_params.verbosity >= 1: + bar.update(i) + i += 1 + if i == 1: + t0 = time.time() + if i == num_simulations: + filehandler = open(f"{save_path}_{i}.pkl", 'wb') + pickle.dump(self.samples, filehandler) + break + if i == 1 or i % 10 == 0: + print(f"Saving in {save_path}_{i}.pkl") + filehandler = open(f"{save_path}_{i}.pkl", 'wb') + pickle.dump(self.samples, filehandler) + + finally: + if self.datagen_params.verbosity >= 1: + bar.finish() + self.sampling_params.server.terminate() + if self.datagen_params.verbosity >= 1: + print('All simulations generated.') + print(f"Total of {len(self.samples)} simulations generated.") + return 0 + + def load_simulations(self, load_path): + print(load_path) + import os + print(os.listdir("./out/")) + with open(load_path, 'rb') as f: + return pickle.load(f) + + def generate(self, num_simulations, num_steps): + self.init_sampler() + save_path=self.datagen_params.datagen_save_dir + self.sample_simulations(num_simulations, num_steps, save_path) + self.samples = self.load_simulations(f"{save_path}_{num_simulations}.pkl") + if self.datagen_params.preprocessing: + self.training_data = self.datagen_params.preprocessing(self.samples) + if self.datagen_params.labeler: + self.training_data = self.datagen_params.labeler(self.training_data) + print("Data generated: ", len(self.training_data)) + self.training_data = {"X": self.training_data[:,:-1], "y": self.training_data[:,-1]} + return self.training_data + + def init_sampler(self): + self.sampling_params.mode = "train" + self.sampling_params = ODDSampler(sampling_params=self.sampling_params).sampling_params diff --git a/src/verifai/modd/odd_evaluator.py b/src/verifai/modd/odd_evaluator.py new file mode 100644 index 0000000..43bec52 --- /dev/null +++ b/src/verifai/modd/odd_evaluator.py @@ -0,0 +1,142 @@ +import time +import progressbar +from verifai.server import ServerTimings +from abc import ABC +import numpy as np +from verifai.modd.odd_sampler import ODDSampler +import pickle +import os + +class Evaluator(ABC): + def __init__(self, eval_params, sampling_params, global_params): + self.eval_params = eval_params + self.sampling_params = sampling_params + self.global_params = global_params + self.evaluation_results = None + self.terminate = False + self.evals = [] + self.samples = {} + + def evaluate(self, monitor): + pass + + + +class GenericEvaluator(Evaluator): + def __init__(self, eval_params, sampling_params, global_params): + super().__init__(eval_params, sampling_params, global_params) + + + def sample_simulations(self, num_simulations, num_steps, save_datagen_path, save_scenes_path): + i = 0 + print(save_datagen_path) + print(save_scenes_path) + self.total_sample_time = 0 + self.total_simulate_time = 0 + self.sampling_params.server.maxSteps = num_steps + if self.eval_params.verbosity >= 1: + suffix = '' + suffix = f' for {num_simulations} simulations' + suffix += f' for {num_steps} timesteps' + print('Generating data' + suffix) + if self.eval_params.verbosity >= 2: + print(f'Server class is {type(self.sampling_params.server)}') + + if self.eval_params.verbosity >= 1: + bar = progressbar.ProgressBar(max_value=num_simulations) + + try: + while True: + try: + if self.sampling_params.mode == "eval" or self.sampling_params.mode == "eval_nomonitor": + start = time.time() + sample = self.sampling_params.server.get_sample() + after_sampling = time.time() + scene = self.sampling_params.server.sampler.lastScene + assert scene + print(scene) + print("Server", self.sampling_params.server) + print("Sampler", self.sampling_params.server.sampler) + print("Scenario", self.sampling_params.server.sampler.scenario) + # data = self.sampling_params.server.sampler.scenario.sceneToBytes(scene, allowPickle=True) + # if save_scenes_path: + # with open(f"{save_scenes_path}_{i}.scene", 'wb') as f: + # f.write(data) + # if self.sampling_params.mode == "eval_nomonitor": + # if save_scenes_path: + # with open(f"{save_scenes_path}_{i}.scene", 'rb') as f: + # data = f.read() + # scene = self.sampling_params.server.sampler.scenario.sceneFromBytes(data) + # assert scene + result = self.sampling_params.server._simulate(scene) + print(result) + if result is None: + return self.sampling_params.server.rejectionFeedback + print(f"Time steps run for this simulation: {len(result.trajectory)}") + value = (0 if self.sampling_params.server.monitor is None + else self.sampling_params.server.monitor.evaluate(result)) + self.sampling_params.server.lastValue = value + + after_simulation = time.time() + timings = ServerTimings(sample_time=(after_sampling - start), + simulate_time=(after_simulation - after_sampling)) + rho = self.sampling_params.server.lastValue + self.total_sample_time += timings.sample_time + self.total_simulate_time += timings.simulate_time + + if self.eval_params.verbosity >= 2: + print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho, "\nRecords: ", result.records) + self.samples[i] = (result.records, rho) + self.evals.append(int(rho > 0)) + i += 1 + if self.eval_params.verbosity >= 1: + bar.update(i) + if i == 1: + t0 = time.time() + if i == num_simulations: + filehandler = open(f"{save_datagen_path}_{i}.pkl", 'wb') + pickle.dump(self.samples, filehandler) + break + if i == 1 or i % 10 == 0: + filehandler = open(f"{save_datagen_path}_{i}.pkl", 'wb') + pickle.dump(self.samples, filehandler) + + except: + if self.eval_params.verbosity >= 1: + if i >= num_simulations: + print("Sampler has generated all possible samples") + else: + print("Sampling failed.") + break + pass + finally: + if self.eval_params.verbosity >= 1: + bar.finish() + self.sampling_params.server.terminate() + if self.eval_params.verbosity >= 1: + print('All simulations generated.') + print(f"Total of {len(self.evals)} simulations generated.") + return 0 + + def generate(self, num_simulations, num_steps, save_path): + save_scenes_path=self.eval_params.scenes_save_dir + self.sample_simulations(num_simulations, num_steps, os.path.abspath(save_path), os.path.abspath(save_scenes_path)) + self.evaluation_data = float(np.mean(np.array(self.evals))) + print(f"-- Eval score: {self.evaluation_data}") + return self.evaluation_data + + def evaluate(self, monitor): + self.init_sampler("eval") + save_path=self.eval_params.datagen_save_dir + eval_data = self.generate(self.eval_params.eval_num_simulations, self.eval_params.eval_num_steps, save_path) + self.evaluation_results = {"eval_score": eval_data} + self.init_sampler("eval_nomonitor") + save_path=self.eval_params.datagen_nomon_save_dir + eval_data_nomon = self.generate(self.eval_params.eval_num_simulations, self.eval_params.eval_num_steps, save_path) + self.evaluation_results["eval_score_nomon"] = eval_data_nomon + return self.evaluation_results + + def init_sampler(self, mode): + self.sampling_params.mode = mode + self.sampling_params.monitor = self.eval_params.save_model_path + self.sampling_params = ODDSampler(sampling_params=self.sampling_params).sampling_params \ No newline at end of file diff --git a/src/verifai/modd/odd_learner.py b/src/verifai/modd/odd_learner.py new file mode 100644 index 0000000..9a1b125 --- /dev/null +++ b/src/verifai/modd/odd_learner.py @@ -0,0 +1,45 @@ +from abc import ABC +from verifai.modd import pymodd +from dotmap import DotMap +import os +from verifai.samplers import ScenicSampler +from verifai.scenic_server import ScenicServer + +class ODDLearner(ABC): + def __init__(self, datagen_params=DotMap(), trainer_params=DotMap(), + eval_params=DotMap(), sampling_params=DotMap(), + global_params=DotMap()): + self.monitor = None + self.training_results = None + self.evaluation_results = None + + self.datagen_params = datagen_params + self.trainer_params = trainer_params + self.eval_params = eval_params + self.sampling_params = sampling_params + self.global_params = global_params + + def run(self): + pass + + +class MODDLearner(ODDLearner): + def __init__(self, datagen_params, trainer_params, eval_params, sampling_params, global_params): + self.modd = pymodd.MODD(datagen_params, + trainer_params, + eval_params, + sampling_params, + global_params, + ) + + super().__init__(datagen_params=datagen_params, + trainer_params=trainer_params, + eval_params=eval_params, + sampling_params=sampling_params, + global_params=global_params) + + def run(self): + self.monitor, self.training_results, self.evaluation_results = self.modd.run() + return self.monitor + + diff --git a/src/verifai/modd/odd_monitor.py b/src/verifai/modd/odd_monitor.py new file mode 100644 index 0000000..b84d5f7 --- /dev/null +++ b/src/verifai/modd/odd_monitor.py @@ -0,0 +1,165 @@ +from abc import ABC +from tqdm import tqdm +import torch +from torch.utils.data import TensorDataset, random_split, DataLoader +from sklearn.model_selection import train_test_split +import torch.nn as nn +import numpy as np +from sklearn.metrics import accuracy_score + +class Monitor(ABC): + def __init__(self, model): + self.model = model + + def train(self, training_set): + return self.model + + def predict(self, x): + pass + + def score(self, training_set): + pass + + +class GenericMonitor(Monitor): + def __init__(self, model): + super().__init__(model) + + def train(self, training_set): + X_train, X_test, y_train, y_test = train_test_split( + training_set["X"], training_set["y"], test_size=0.2, random_state=42) + self.model.fit(X_train, y_train) + y_pred = self.model.predict(X_test) + + accuracy = accuracy_score(y_test, y_pred) + print(f"Accuracy: {accuracy*100:.2f}%") + # self.model.fit(training_set["X"], training_set["y"]) + return self.model + + def predict(self, x): + return self.model.predict(x) + + def score(self, training_set): + return self.model.score(training_set["X"], training_set["y"]) + +class TorchMonitor(Monitor): + def __init__(self, model): + super().__init__(model) + self.learning_rate = 1e-3 + self.weight_decay = 1e-1 + self.num_epochs = 40 + self.batch_size = 16 + self.model = model + + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + tqdm.write("Use device: {device:}\n".format(device=self.device)) + + def train_loop(self, epoch, dataloader, optimizer, loss_function, device): + # model to training mode (important to correctly handle dropout or batchnorm layers) + self.model.train() + self.model.to(device) + # allocation + total_loss = 0 # accumulated loss + n_entries = 0 # accumulated number of data points + # progress bar def + train_pbar = tqdm(dataloader, desc="Training Epoch {epoch:2d}".format(epoch=epoch), leave=True) + # training loop + for x, label in train_pbar: + # data to device (CPU or GPU if available) + x, label = x.to(device), label.to(device) + + optimizer.zero_grad() + outputs = self.model(x) + loss = loss_function(outputs.squeeze(), label) + loss.backward() + optimizer.step() + + # Update accumulated values + total_loss += loss.detach().cpu().numpy() + n_entries += len(x) + + # Update progress bar + train_pbar.set_postfix({'loss': total_loss / n_entries}) + train_pbar.close() + return total_loss / n_entries + + def train(self, training_set): + tqdm.write("Building data loaders...") + + tensor_x = torch.Tensor(training_set["X"]) + tensor_y = torch.Tensor(training_set["y"]) + + dataset = TensorDataset(tensor_x,tensor_y) + dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True) + tqdm.write("Done!\n") + loss_function = nn.BCEWithLogitsLoss() + + tqdm.write("Define optimiser...") + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + tqdm.write("Done!\n") + + lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) + + + tqdm.write("Training...") + best_loss = np.inf + # allocation + train_loss_all, valid_loss_all = [], [] + + # loop over epochs + for epoch in range(1, self.num_epochs + 1): + # training loop + train_loss = self.train_loop(epoch, dataloader, optimizer, loss_function, self.device) + + # collect losses + train_loss_all.append(train_loss) + + # Print message + tqdm.write('Epoch {epoch:2d}: \t' + 'Train Loss {train_loss:.6f}' + .format(epoch=epoch, + train_loss=train_loss + ) + ) + + # Update learning rate with lr-scheduler + if lr_scheduler: + lr_scheduler.step() + return self.model + + + def predict(self, x): + return self.model.predict(x) + + def score(self, training_set): + tensor_x = torch.Tensor(training_set["X"]) + tensor_y = torch.Tensor(training_set["y"]) + + dataset = TensorDataset(tensor_x,tensor_y) + dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True) + + loss_function = nn.BCEWithLogitsLoss() + + self.model.eval() + # allocation + total_loss = 0 # accumulated loss + n_entries = 0 # accumulated number of data points + + # progress bar def + eval_pbar = tqdm(dataloader, leave=True) + # evaluation loop + for x, label in eval_pbar: + # data to device (CPU or GPU if available) + x, label = x.to(self.device), label.to(self.device) + with torch.no_grad(): + outputs = self.model(x) + loss = loss_function(outputs.squeeze(), label) + + # Update accumulated values + total_loss += loss.detach().cpu().numpy() + n_entries += len(x) + + # Update progress bar + eval_pbar.set_postfix({'loss': total_loss / n_entries}) + eval_pbar.close() + return total_loss / n_entries \ No newline at end of file diff --git a/src/verifai/modd/odd_sampler.py b/src/verifai/modd/odd_sampler.py new file mode 100644 index 0000000..692c184 --- /dev/null +++ b/src/verifai/modd/odd_sampler.py @@ -0,0 +1,37 @@ +from dotmap import DotMap +from verifai.samplers import ScenicSampler +import os + +class ODDSampler(): + def __init__(self, sampling_params): + self.sampling_params = sampling_params + + if self.sampling_params.has_key("server_type"): + if self.sampling_params.server_type == "scenic": + server_params = DotMap() + server_params.update(self.sampling_params.server_options) + self.init_server(server_params, self.sampling_params.server_class) + self.sampling_params.server = self.server + + + def init_server(self, server_options, server_class): + if self.sampling_params.verbosity >= 1: + print("Initializing server") + sampling_data = DotMap() + if not self.sampling_params.has_key("sampler_type"): + self.sampler_type = 'random' + sampling_data.sampler_type = self.sampler_type + sampling_data.sample_space = None + sampling_data.sampler_params = None + if self.sampling_params.mode == "train": + sampling_data.sampler = ScenicSampler.fromScenario(self.sampling_params.path, mode2D=True, maxSteps=300, params={"monitor": "", "seed":"", "render" : 1, "verbosity": 3, "timeBound": 300, "controller": self.sampling_params.controller_path}) + + elif self.sampling_params.mode == "eval": + sampling_data.sampler = ScenicSampler.fromScenario(self.sampling_params.path, mode2D=True, maxSteps=300, params={"monitor": os.path.abspath(self.sampling_params.monitor), "seed": 42, "render" : 1, "verbosity": 3, "timeBound": 300, "controller": self.sampling_params.controller_path}) + + elif self.sampling_params.mode == "eval_nomonitor": + sampling_data.sampler = ScenicSampler.fromScenario(self.sampling_params.path, mode2D=True, maxSteps=300, params={"monitor": "", "seed": 42, "render" : 1, "verbosity": 3, "timeBound": 300, "controller": self.sampling_params.controller_path}) + + self.server = server_class(sampling_data, monitor=self.sampling_params.spec_monitor, options=server_options) + if self.sampling_params.verbosity >= 1: + print("Server ready") diff --git a/src/verifai/modd/odd_trainer.py b/src/verifai/modd/odd_trainer.py new file mode 100644 index 0000000..72c9293 --- /dev/null +++ b/src/verifai/modd/odd_trainer.py @@ -0,0 +1,39 @@ +from abc import ABC +from verifai.modd.odd_monitor import GenericMonitor, TorchMonitor +from sklearn import tree +from matplotlib import pyplot as plt +import pickle +import os +import torch + + +class Trainer(ABC): + def __init__(self, trainer_params): + self.trainer_params = trainer_params + self.monitor = None + self.training_results = None + + def train(self, training_data): + pass + + +class GenericTrainer(Trainer): + def __init__(self, trainer_params): + super().__init__(trainer_params) + self.monitor = GenericMonitor(trainer_params.model) + + + def train(self, training_data): + self.monitor.train(training_set=training_data) + self.training_results = {"score": self.monitor.score(training_data)} + print(self.training_results) + self.save(self.monitor) + return self.monitor, self.training_results + + def save(self, monitor): + os.makedirs(os.path.dirname(self.trainer_params.save_model_path), exist_ok=True) + with open(self.trainer_params.save_model_path, "wb") as f: + pickle.dump(monitor.model, f) + + def save_torch(self, monitor): + torch.save(monitor.model.state_dict(), self.trainer_params.save_model_path) \ No newline at end of file diff --git a/src/verifai/modd/pymodd.py b/src/verifai/modd/pymodd.py new file mode 100644 index 0000000..a19f6e1 --- /dev/null +++ b/src/verifai/modd/pymodd.py @@ -0,0 +1,68 @@ +from verifai.modd.odd_evaluator import GenericEvaluator +from verifai.modd.odd_data_generator import GenericDataGenerator +from verifai.modd.odd_trainer import GenericTrainer +from verifai.modd.odd_monitor import Monitor +from dotmap import DotMap + +class MODD(): + def __init__(self, + datagen_params=DotMap(), + trainer_params=DotMap(), + eval_params=DotMap(), + sampling_params=DotMap(), + global_params=DotMap(), + data_generator=None, + trainer=None, + evaluator=None): + self.datagen_params = datagen_params + self.trainer_params = trainer_params + self.eval_params = eval_params + self.sampling_params = sampling_params + self.global_params = global_params + + self.data_generator=data_generator + self.trainer=trainer + self.evaluator=evaluator + + if self.data_generator is None: + self.data_generator = GenericDataGenerator(datagen_params=datagen_params, sampling_params=sampling_params, global_params=global_params) + if self.trainer is None: + self.trainer = GenericTrainer(trainer_params=trainer_params) + if self.evaluator is None: + self.evaluator = GenericEvaluator(eval_params=eval_params, sampling_params=sampling_params, global_params=global_params) + + + + def run(self): + refinement_iters = 0 + if self.global_params.has_key("refinement_iters"): + refinement_iters = self.global_params.refinement_iters + + + training_data = self.data_generator.generate(self.global_params.initial_num_simulations, self.global_params.initial_num_steps) # type: ignore + print("-- Finished training") + self.monitor, self.training_results = self.trainer.train(training_data) + print("-- Finished learning") + self.evaluation_results = self.evaluator.evaluate(self.monitor) + print("-- Finished evaluation") + + while refinement_iters > 0 and not self.evaluator.terminate: + # The parameters sampling_params of the data_generator, learner, and evaluator + # can be modified here to change the simulations generated during the refinement iterations. + + training_data = self.data_generator.generate(self.global_params.refinement_num_simulations, self.global_params.refinement_num_steps) # type: ignore + self.monitor, self.training_results = self.learner.learn(training_data) + self.evaluation_results = self.evaluator.evaluate(self.monitor) + + refinement_iters -= 1 + + return self.monitor, self.training_results, self.evaluation_results + + + + + + + + + diff --git a/src/verifai/scenic_server.py b/src/verifai/scenic_server.py index fc58dcd..6bd8eda 100644 --- a/src/verifai/scenic_server.py +++ b/src/verifai/scenic_server.py @@ -51,6 +51,7 @@ def __init__(self, sampling_data, monitor, options={}): self.simulator = self.sampler.scenario.getSimulator() else: self.simulator = defaults.simulator + print("Scenic server initialized") def evaluate_sample(self, sample): scene = self.sampler.lastScene @@ -121,6 +122,7 @@ def __init__(self, scenic_path, worker_num, monitor, options={}): self.maxIterations = defaults.maxIterations def get_sample(self, sample): + print("Get sample in SampleSimulator") self.sampler.scenario.externalSampler.last_sample = sample self.full_sample = self.sampler.nextSample(sample) diff --git a/src/verifai/server.py b/src/verifai/server.py index 87a75fa..b78bcdd 100644 --- a/src/verifai/server.py +++ b/src/verifai/server.py @@ -179,6 +179,7 @@ def close_connection(self): self.client_socket.close() def get_sample(self): + print("Get sample in server") return self.sampler.getSample() def flatten_sample(self, sample):