Skip to content

Commit

Permalink
Added evaluation - have to wait til I have a trained model to fully t…
Browse files Browse the repository at this point in the history
…est it

Changed from "upload_tags" in config to using separate stages for train and eval upload
  • Loading branch information
luke-iqt committed Jul 20, 2021
1 parent 2c49394 commit fbae5ec
Show file tree
Hide file tree
Showing 3 changed files with 203 additions and 66 deletions.
39 changes: 39 additions & 0 deletions ml-model/scripts/evaluation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import logging
import fiftyone as fo
from fiftyone import ViewField as F
import os
import numpy as np

def evaluate_detection_model(dataset_name, prediction_field, evaluation_key):

dataset = fo.load_dataset(dataset_name)

view = dataset.match_tags(evaluation_name)

# setting an empty detections field if there isn't one
for sample in view:
if sample["detections"] == None:
sample["detections"] = fo.Detections(detections=[])
sample.save()

results = view.evaluate_detections( prediction_field, gt_field="detections", eval_key=evaluation_key)

# Get the 10 most common classes in the dataset
counts = view.count_values("detections.detections.label")
classes = sorted(counts, key=counts.get, reverse=True)[:10]

# Print a classification report for the top-10 classes
results.print_report(classes=classes)

# Print some statistics about the total TP/FP/FN counts
logging.info("TP: %d" % dataset.sum(evaluation_key + "_tp"))
logging.info("FP: %d" % dataset.sum(evaluation_key + "_fp"))
logging.info("FN: %d" % dataset.sum(evaluation_key + "_fn"))

# Create a view that has samples with the most false positives first, and
# only includes false positive boxes in the `predictions` field
eval_view = (view
.sort_by(evaluation_key + "_fp", reverse=True)
.filter_labels(prediction_field, F(evaluation_key) == "fp")
)
logging.info("mAP: {}".format(results.mAP()))
8 changes: 5 additions & 3 deletions ml-model/scripts/labelbox_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ def upload_vox51_dataset_to_labelbox(
labelbox_project_name,
voxel51_dataset_name,
upload_num_samples=500,
upload_tag="training",
upload_tag="train",
avoid_tag="eval",
labelbox_id_field="labelbox_id"
):
"""Upload a voxel51 dataset to labelbox.
Expand Down Expand Up @@ -70,9 +71,10 @@ def upload_vox51_dataset_to_labelbox(
labelbox_dataset = list(labelbox_datasets)[0]

# take random sample of images and upload to labelbox
view = dataset.shuffle().take(upload_num_samples)
stage = fo.MatchTags(avoid_tag, bool=False)
view = dataset.add_stage(stage).shuffle().take(upload_num_samples)

# add a "training" tag to all of the samples being sent to labelbox
# add uplod_tag to all of the samples being sent to labelbox
for sample in view:
sample.tags.append(upload_tag)
sample.save()
Expand Down
222 changes: 159 additions & 63 deletions ml-model/scripts/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,7 @@ def read_config(config_file=os.path.join("config", "config.ini")):
"tile_string": "1920x1080,768x768",
"tile_overlap": 50,
"iou_threshold": 0,
"upload_tag": "training",
"num_upload_samples": 500
"upload_num_samples": 500
}
config.read(config_file)
logging.info("Finished reading config file.")
Expand All @@ -76,17 +75,28 @@ def parse_command_line_arguments():
help="Prepare voxel51 dataset.",
)
parser.add_argument(
"--upload",
"--upload_to_labelbox",
"--upload_train",
default=False, # default value is False
action="store_true",
help="Upload dataset to labelbox.",
help="Upload train samples to labelbox.",
)
parser.add_argument(
"--resume_upload",
"--resume_upload_train",
default=False, # default value is False
action="store_true",
help="Resume upload dataset to labelbox.",
help="Resume uploading train samples to labelbox.",
)
parser.add_argument(
"--upload_eval",
default=False, # default value is False
action="store_true",
help="Upload eval samples to labelbox.",
)
parser.add_argument(
"--resume_upload_eval",
default=False, # default value is False
action="store_true",
help="Resume uploading eval samples to labelbox.",
)
parser.add_argument(
"--download",
Expand All @@ -110,6 +120,13 @@ def parse_command_line_arguments():
help="Model prediction.",
)

parser.add_argument(
"--evaluate",
default=False, # default value is False
action="store_true",
help="Model evaluation.",
)

parser.add_argument(
"--predict_tiled",
default=False, # default value is False
Expand Down Expand Up @@ -157,8 +174,8 @@ def parse_command_line_arguments():
)
sys.exit(1) # exit program

# check if user selected upload to labelbox stage
if args.upload:
# check if user selected upload train to labelbox stage
if args.upload_train:
if all(
[
config["labelbox"]["api_key"],
Expand All @@ -167,25 +184,81 @@ def parse_command_line_arguments():
config["file_names"]["dataset_name"],
]
):
logging.info("Entering 'upload dataset to labelbox' route.")
logging.info("Entering 'upload train samples to labelbox' route.")
upload_vox51_dataset_to_labelbox(
config["labelbox"]["api_key"],
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
config["upload"]["upload_num_samples"],
config["upload"]["upload_tag"]
int(config["upload"]["upload_num_samples"]),
"train",
"eval"
)
logging.info("Exiting 'upload dataset to labelbox' route.")
logging.info("Exiting 'upload train samples to labelbox' route.")
else:
logging.info(
"""Missing config file value for labelbox API key, lablebox dataset name,
labelbox project name or voxel51 dataset name."""
)
sys.exit(1) # exit program

# check if user selected resume_upload to labelbox stage
if args.resume_upload:
# check if user selected upload eval to labelbox stage
if args.upload_eval:
if all(
[
config["labelbox"]["api_key"],
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
]
):
logging.info("Entering 'upload eval samples to labelbox' route.")
upload_vox51_dataset_to_labelbox(
config["labelbox"]["api_key"],
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
int(config["upload"]["upload_num_samples"]),
"eval"
"train"
)
logging.info("Exiting 'upload eval samples to labelbox' route.")
else:
logging.info(
"""Missing config file value for labelbox API key, lablebox dataset name,
labelbox project name or voxel51 dataset name."""
)
sys.exit(1) # exit program


# check if user selected resume_upload_train to labelbox stage
if args.resume_upload_train:
if all(
[
config["labelbox"]["api_key"],
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
]
):
logging.info("Entering 'resume uploading train samples to labelbox' route.")
resume_upload_vox51_dataset_to_labelbox(
config["labelbox"]["api_key"],
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
"train"
)
logging.info("Exiting 'resume uploading train samples to labelbox' route.")
else:
logging.info(
"""Missing config file value for labelbox API key, lablebox dataset name,
labelbox project name or voxel51 dataset name."""
)
sys.exit(1) # exit program

# check if user selected resume_upload_eval to labelbox stage
if args.resume_upload_eval:
if all(
[
config["labelbox"]["api_key"],
Expand All @@ -200,7 +273,7 @@ def parse_command_line_arguments():
config["labelbox"]["dataset_name"],
config["labelbox"]["project_name"],
config["file_names"]["dataset_name"],
config["upload"]["upload_tag"]
"eval"
)
logging.info("Exiting 'resume upload dataset to labelbox' route.")
else:
Expand All @@ -211,6 +284,7 @@ def parse_command_line_arguments():
sys.exit(1) # exit program



# check if user selected download from labelbox stage
if args.download:
if (
Expand All @@ -230,6 +304,59 @@ def parse_command_line_arguments():
)
sys.exit(1) # exit program

# check if user selected train model stage
if args.train:
if all ([
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"],
config["model"]["num_train_steps"]
]
):
logging.info("Entering 'train model' route.")
train_detection_model(
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"],
int(config["model"]["num_train_steps"]),
config["model"]["label_field"],
int(config["model"]["num_eval_steps"])
)
logging.info("Exiting 'train model' route.")
else:
logging.info(
"""Missing one or more config file values required for training:
- file_names / dataset_name
- model / training_name
- model / base_model
- model / num_train_steps"""
)
sys.exit(1) # exit program

# check if user selected export model stage
if args.export_model:
if all ([
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"]
]
):
logging.info("Entering 'export model' route.")
export_detection_model(
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"]
)
logging.info("Exiting 'export model' route.")
else:
logging.info(
"""Missing one or more config file values required for exporting a model:
- file_names / dataset_name
- model / training_name
- model / base_model"""
)
sys.exit(1) # exit program


# check if user selected model prediction stage
if args.predict:
Expand Down Expand Up @@ -270,8 +397,8 @@ def parse_command_line_arguments():
config["model"]["training_name"],
config["prediction"]["prediction_field"],
config["prediction"]["tile_string"],
config["prediction"]["tile_overlap"],
config["prediction"]["iou_threshold"]
int(config["prediction"]["tile_overlap"]),
float(config["prediction"]["iou_threshold"])
)
logging.info("Exiting 'model prediction tiled' route.")
else:
Expand All @@ -283,59 +410,28 @@ def parse_command_line_arguments():
)
sys.exit(1) # exit program





# check if user selected train model stage
if args.train:
# check if user selected evaluate model stage
if args.evaluate:
if all ([
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"],
config["model"]["num_train_steps"]
config["prediction"]["prediction_field"],
config["evaluation"]["evaluation_key"]
]
):
logging.info("Entering 'train model' route.")
train_detection_model(
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"],
config["model"]["num_train_steps"],
config["model"]["label_field"],
config["model"]["num_eval_steps"]
)
logging.info("Exiting 'train model' route.")
else:
logging.info(
"""Missing one or more config file values required for training:
- file_names / dataset_name
- model / training_name
- model / base_model
- model / num_train_steps"""
)
sys.exit(1) # exit program

# check if user selected export model stage
if args.export_model:
if all ([
logging.info("Entering 'model evaluation' route.")
evaluate_detection_model(
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"]
]
):
logging.info("Entering 'export model' route.")
export_detection_model(
config["file_names"]["dataset_name"],
config["model"]["training_name"],
config["model"]["base_model"]
config["prediction"]["prediction_field"],
config["evaluation"]["evaluation_key"]
)
logging.info("Exiting 'export model' route.")
logging.info("Exiting 'model evaluation' route.")
else:
logging.info(
"""Missing one or more config file values required for exporting a model:
"""Missing one or more config file values required for evaluation:
- file_names / dataset_name
- model / training_name
- model / base_model"""
- prediction / prediction_field
- evaluation / evaluation_key"""
)
sys.exit(1) # exit program


0 comments on commit fbae5ec

Please sign in to comment.