Skip to content
Snippets Groups Projects
Commit a71a5ef3 authored by Maciej Wielgosz's avatar Maciej Wielgosz
Browse files

inst seg metrics updated to contain more params and saving global results

parent fe50c994
Branches
No related tags found
No related merge requests found
......@@ -276,8 +276,8 @@ class InstanceSegmentationMetrics:
# find hight of the tree in the prediction
hight_of_tree_pred = (self.instance_segmented_las[self.Y_labels == label].z).max() - (self.instance_segmented_las[self.Y_labels == label].z).min()
# get resiudal of the hight of the tree in the prediction
residual_hight_of_tree_pred = hight_of_tree_gt - hight_of_tree_pred
# get abs resiudal of the hight of the tree in the prediction
residual_hight_of_tree_pred = abs(hight_of_tree_gt - hight_of_tree_pred)
# create tmp dict
tmp_dict = {
......@@ -299,7 +299,7 @@ class InstanceSegmentationMetrics:
metric_dict[str(label)] = tmp_dict
# list of interesting metrics
interesting_parameters = ['true_positive', 'false_positive', 'false_negative', 'true_negative', 'precision', 'recall', 'f1_score', 'IoU']
interesting_parameters = ['precision', 'recall', 'f1_score', 'IoU', 'residual_hight(gt_minus_pred)']
# weight the metrics by tree hight
metric_dict_weighted_by_tree_hight = {}
......@@ -350,11 +350,14 @@ class InstanceSegmentationMetrics:
trees_correctly_predicted_IoU = set(trees_correctly_predicted_IoU)
tree_level_metric = {
'true_positve (detection rate)': len(trees_correctly_predicted_IoU),
'false_positve (commission)': len(trees_predicted - trees_correctly_predicted_IoU),
'false_negative (omissions)': len(gt_trees - trees_predicted - trees_correctly_predicted_IoU),
'true_positve (detection rate)': len(trees_correctly_predicted_IoU) / len(gt_trees),
'false_positve (commission)': len(trees_predicted - trees_correctly_predicted_IoU) / len(gt_trees),
'false_negative (omissions)': len(gt_trees - trees_predicted - trees_correctly_predicted_IoU) / len(gt_trees),
'gt': len(gt_trees)}
# add tree level metrics to the metric_dict_mean
metric_dict_mean.update(tree_level_metric)
if self.verbose:
print('Tree level metrics:')
print(f'Trees in the ground truth: {gt_trees}')
......@@ -363,6 +366,8 @@ class InstanceSegmentationMetrics:
print(tree_level_metric)
return metric_dict, metric_dict_weighted_by_tree_hight, metric_dict_mean
def print_metrics(self, metric_dict):
......
import csv
import glob
import os
import argparse
......@@ -125,12 +126,13 @@ class InstanceSegmentationMetricsInFolder():
if self.output_folder_path is not None:
# create the output folder path
save_to_csv_path = os.path.join(self.output_folder_path, 'mean_f1_score.csv')
# save the mean f1 score to a csv file
with open(save_to_csv_path, 'w') as f:
f.write('mean_f1_score\n')
f.write(str(mean_f1_score))
save_to_csv_path = os.path.join(self.output_folder_path, 'summary_metrics_all_plots.csv')
# save the mean metrics to a csv file
with open(save_to_csv_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in mean_metrics.items():
writer.writerow([key, value])
if self.verbose:
print('Mean F1 Score: {}'.format(mean_f1_score))
# print the mean metrics
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment