diff --git a/metrics/instance_segmentation_metrics.py b/metrics/instance_segmentation_metrics.py
index 51f88533b750e4b72d085514fc71ec9deccce6ce..a7b891b47f6b507e1695b9c979012fb9940fb563 100644
--- a/metrics/instance_segmentation_metrics.py
+++ b/metrics/instance_segmentation_metrics.py
@@ -276,8 +276,8 @@ class InstanceSegmentationMetrics:
                 # find hight of the tree in the prediction
                 hight_of_tree_pred = (self.instance_segmented_las[self.Y_labels == label].z).max() - (self.instance_segmented_las[self.Y_labels == label].z).min()
                
-                # get resiudal of the hight of the tree in the prediction
-                residual_hight_of_tree_pred = hight_of_tree_gt - hight_of_tree_pred
+                # get abs resiudal of the hight of the tree in the prediction
+                residual_hight_of_tree_pred = abs(hight_of_tree_gt - hight_of_tree_pred)
 
                 # create tmp dict
                 tmp_dict = {
@@ -299,7 +299,7 @@ class InstanceSegmentationMetrics:
                 metric_dict[str(label)] = tmp_dict
             
         # list of interesting metrics 
-        interesting_parameters = ['true_positive', 'false_positive', 'false_negative', 'true_negative', 'precision', 'recall', 'f1_score', 'IoU']
+        interesting_parameters = ['precision', 'recall', 'f1_score', 'IoU', 'residual_hight(gt_minus_pred)']
 
         # weight the metrics by tree hight
         metric_dict_weighted_by_tree_hight = {}
@@ -350,11 +350,14 @@ class InstanceSegmentationMetrics:
             trees_correctly_predicted_IoU = set(trees_correctly_predicted_IoU)
 
             tree_level_metric = {
-                'true_positve (detection rate)': len(trees_correctly_predicted_IoU), 
-                'false_positve (commission)': len(trees_predicted - trees_correctly_predicted_IoU), 
-                'false_negative (omissions)': len(gt_trees - trees_predicted - trees_correctly_predicted_IoU), 
+                'true_positve (detection rate)': len(trees_correctly_predicted_IoU) / len(gt_trees), 
+                'false_positve (commission)': len(trees_predicted - trees_correctly_predicted_IoU) / len(gt_trees), 
+                'false_negative (omissions)': len(gt_trees - trees_predicted - trees_correctly_predicted_IoU) / len(gt_trees), 
                 'gt': len(gt_trees)}
 
+            # add tree level metrics to the metric_dict_mean
+            metric_dict_mean.update(tree_level_metric)
+
             if self.verbose:
                 print('Tree level metrics:')    
                 print(f'Trees in the ground truth: {gt_trees}')
@@ -363,6 +366,8 @@ class InstanceSegmentationMetrics:
 
                 print(tree_level_metric)
 
+            
+
         return metric_dict, metric_dict_weighted_by_tree_hight, metric_dict_mean
 
     def print_metrics(self, metric_dict):
diff --git a/metrics/instance_segmentation_metrics_in_folder.py b/metrics/instance_segmentation_metrics_in_folder.py
index cf6082566b6cacb0045a0907ecc91019614db5c3..f299e50f59cdd5794c00eb8b048f2da62b9d917e 100644
--- a/metrics/instance_segmentation_metrics_in_folder.py
+++ b/metrics/instance_segmentation_metrics_in_folder.py
@@ -1,3 +1,4 @@
+import csv
 import glob
 import os
 import argparse
@@ -125,12 +126,13 @@ class InstanceSegmentationMetricsInFolder():
    
         if self.output_folder_path is not None:
             # create the output folder path
-            save_to_csv_path = os.path.join(self.output_folder_path, 'mean_f1_score.csv')
-            # save the mean f1 score to a csv file
-            with open(save_to_csv_path, 'w') as f:
-                f.write('mean_f1_score\n')
-                f.write(str(mean_f1_score))
-
+            save_to_csv_path = os.path.join(self.output_folder_path, 'summary_metrics_all_plots.csv')
+            # save the mean metrics to a csv file
+            with open(save_to_csv_path, 'w') as csv_file:
+                writer = csv.writer(csv_file)
+                for key, value in mean_metrics.items():
+                    writer.writerow([key, value])
+                    
         if self.verbose:
             print('Mean F1 Score: {}'.format(mean_f1_score))
             # print the mean metrics