diff --git a/bash_helper_scripts/get_austrian_after_sem_seg.sh b/bash_helper_scripts/get_austrian_after_sem_seg.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2c7f4b7a0395881508fcf8aa733a8d7eecf4b0ff
--- /dev/null
+++ b/bash_helper_scripts/get_austrian_after_sem_seg.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+TARGET_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/maciek_optimized
+# clean the folder
+rm -rf $TARGET_FOLDER/*
+
+cp -r /home/nibio/mutable-outside-world/data/austrian_data_after_seg/* $TARGET_FOLDER
+
diff --git a/bash_helper_scripts/get_austrian_after_sem_seg_sean.sh b/bash_helper_scripts/get_austrian_after_sem_seg_sean.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d581635befa02af19ea040064f5787bd4b1bcd1d
--- /dev/null
+++ b/bash_helper_scripts/get_austrian_after_sem_seg_sean.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+TARGET_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
+# clean the folder
+rm -rf $TARGET_FOLDER/*
+
+cp -r /home/nibio/mutable-outside-world/data/austrian_data_after_seg_sean/* $TARGET_FOLDER
+
diff --git a/bash_helper_scripts/get_small_data_for_playground.sh b/bash_helper_scripts/get_small_data_for_playground.sh
index 3e096fb1a845bd2a80de14bade678c0e873668c7..a0c5e0f2b06007b807018a42cf8a9201507e01eb 100755
--- a/bash_helper_scripts/get_small_data_for_playground.sh
+++ b/bash_helper_scripts/get_small_data_for_playground.sh
@@ -6,7 +6,7 @@ cp /home/nibio/mutable-outside-world/data/small_file_pipeline_test/small_file_pi
 # change name of the file to first.laz 
 mv $TARGET_FOLDER/small_file_pipeline_test.las $TARGET_FOLDER/first.las
 
-# # make a copy of the file
+# make a copy of the file
 # cp $TARGET_FOLDER/first.las $TARGET_FOLDER/second.las
 
 # # make a copy of the file
diff --git a/fsct/model/model_best_agnostic.pth b/fsct/model/model_best_agnostic.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6856d2508310be4354ca9428b0307a405d84fee4
Binary files /dev/null and b/fsct/model/model_best_agnostic.pth differ
diff --git a/helpers/how_many_points_in_class.py b/helpers/how_many_points_in_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9b1e84d70a9fd60801333017a6e4688eb7b0443
--- /dev/null
+++ b/helpers/how_many_points_in_class.py
@@ -0,0 +1,51 @@
+import os
+from collections import Counter
+import laspy
+
+
+def main (folder_path):
+    # Define the path to the folder containing the LAS files
+    # folder_path = "/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground"
+
+    # Define the class codes you want to count
+    class_codes = [0, 1, 2, 3, 4]
+
+    # Define a dictionary to hold the counts for each class
+    class_counts = {class_code: 0 for class_code in class_codes}
+
+    # Iterate through the files in the folder
+    for file_name in os.listdir(folder_path):
+        if file_name.endswith(".las"):
+            # Open the LAS file using laspy
+            las_file = laspy.read(os.path.join(folder_path, file_name))
+
+            # Count the points in each class and update the class_counts dictionary
+            point_classes = Counter(las_file.label)
+            for class_code in class_codes:
+                class_counts[class_code] += point_classes[class_code]
+
+    # Define the names of the classes
+    names = ["ignore", "terrain", "vegetation", "CWD ", "stem"]
+
+    # Print the class counts
+    print("Class counts:")
+    for class_code in class_codes:
+        print(f"Class {names[class_code]}: {class_counts[class_code]}")
+
+    # print it in percentages
+    print("Class counts in percentages:")
+    for class_code in class_codes:
+        print(f"Class {names[class_code]}: {class_counts[class_code] / sum(class_counts.values()) * 100:.2f} %")
+
+
+if __name__ == "__main__":
+    # use argparse to parse the command line arguments
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--folder_path", help="Path to the folder containing the LAS files")
+
+    args = parser.parse_args()
+
+    main(args.folder_path)
+
+
diff --git a/helpers/remove_points_of_class_from_pc.py b/helpers/remove_points_of_class_from_pc.py
new file mode 100644
index 0000000000000000000000000000000000000000..6008c955196ebffefc86d6cdc578933fb12f3232
--- /dev/null
+++ b/helpers/remove_points_of_class_from_pc.py
@@ -0,0 +1,84 @@
+import argparse
+import glob
+import os
+import laspy
+import numpy as np
+
+class RemovePointsOfClassFromPC:
+    def __init__(self, folder_name, class_name, class_value, verbose=False):
+        self.folder_name = folder_name
+        self.class_name = class_name
+        self.class_value = class_value
+        self.verbose = verbose
+
+    def get_paths_of_files(self, folder_name):
+        # use glob to get all the paths of the files in the folder
+        paths = glob.glob(os.path.join(folder_name, "*.las"), recursive=False)
+
+        # check if the folder is empty
+        if len(paths) == 0:
+            raise Exception("The folder is empty")
+
+        if self.verbose:
+            print("The number of files in the folder {} is {}".format(folder_name, len(paths)))
+
+        return paths
+
+    def read_one_las_file_and_remove_points(self, file_path, class_name, class_value):
+        # read the las file
+        las_file = laspy.read(file_path)
+
+        # check if at least one point has the class value in the las file
+        if np.sum(las_file[class_name] == class_value) == 0:
+            if self.verbose:
+                print("No points with the class value {} in the file {}".format(class_value, file_path))
+                print("No points removed from the file {}".format(file_path))
+
+
+        # create a new las file with the same header as the original las file
+        new_file = laspy.create(point_format=las_file.header.point_format, file_version=las_file.header.version)
+
+        # write the points to the las file except the points of the class
+        new_file.points = las_file.points[las_file[class_name] != class_value]
+
+        # write the las file
+        new_file.write(file_path)
+
+    def remove_points_of_class_from_pc(self, folder_name, class_name, class_value):
+        # get the paths of all the files in the folder
+        paths = self.get_paths_of_files(folder_name)
+
+        # read all the files and remove the points of the class
+        for path in paths:
+            if self.verbose:
+                print("Removing points from the file {}".format(path))
+            self.read_one_las_file_and_remove_points(path, class_name, class_value)
+
+        if self.verbose:
+            print("Done")
+
+    def __call__(self):
+        self.remove_points_of_class_from_pc(self.folder_name, self.class_name, self.class_value)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="Remove points of a class from a point cloud")
+    parser.add_argument("-f", "--folder_name", type=str, help="The folder name where the point clouds are stored")
+    parser.add_argument("-n", "--class_name", type=str, help="The class name of the points to be removed")
+    parser.add_argument("-c", "--class_value", type=int, help="The class value of the points to be removed")
+    parser.add_argument("-v", "--verbose", action="store_true", help="Print the progress")
+
+    args = parser.parse_args()
+
+    # get the arguments
+    folder_name = args.folder_name
+    class_name = args.class_name
+    class_value = args.class_value
+    verbose = args.verbose
+
+    # remove the points of the class from the point cloud
+    remove_points_of_class_from_pc = RemovePointsOfClassFromPC(folder_name, class_name,class_value, verbose)
+    remove_points_of_class_from_pc()
+
+
+        
\ No newline at end of file
diff --git a/metrics/instance_segmentation_metrics.py b/metrics/instance_segmentation_metrics.py
index bd7e9cfa721003829e354d1d351b8eb0af27e58c..e10ecee4c5447eb2c9e58ac504c3ce09b5e6adef 100644
--- a/metrics/instance_segmentation_metrics.py
+++ b/metrics/instance_segmentation_metrics.py
@@ -9,7 +9,7 @@ from sklearn.neighbors import KDTree
 logging.basicConfig(level=logging.INFO)
 
 class InstanceSegmentationMetrics:
-    GT_LABEL_NAME = 'treeID'  #GT_LABEL_NAME = 'StemID'
+    GT_LABEL_NAME = 'treeID'  #GT_LABEL_NAME = 'StemID', 'treeID'
     TARGET_LABEL_NAME = 'instance_nr'
     def __init__(
         self, 
@@ -290,7 +290,9 @@ class InstanceSegmentationMetrics:
                 hight_of_tree_pred = (self.instance_segmented_las[self.Y_labels == label].z).max() - (self.instance_segmented_las[self.Y_labels == label].z).min()
                
                 # get abs resiudal of the hight of the tree in the prediction
-                residual_hight_of_tree_pred = abs(hight_of_tree_gt - hight_of_tree_pred)
+                residual_hight_of_tree_pred = hight_of_tree_gt - hight_of_tree_pred
+
+                rmse_hight = np.square(residual_hight_of_tree_pred)
 
                 # create tmp dict
                 tmp_dict = {
@@ -299,6 +301,7 @@ class InstanceSegmentationMetrics:
                 'high_of_tree_gt': hight_of_tree_gt,
                 'high_of_tree_pred': hight_of_tree_pred,
                 'residual_hight(gt_minus_pred)': residual_hight_of_tree_pred,
+                'rmse_hight': rmse_hight,
                 'sum_all': sum_all,
                 'true_positive': true_positive, 
                 'false_positive': false_positive, 
@@ -312,7 +315,7 @@ class InstanceSegmentationMetrics:
                 metric_dict[str(label)] = tmp_dict
             
         # list of interesting metrics 
-        interesting_parameters = ['precision', 'recall', 'f1_score', 'IoU', 'residual_hight(gt_minus_pred)']
+        interesting_parameters = ['precision', 'recall', 'f1_score', 'IoU', 'residual_hight(gt_minus_pred)', 'rmse_hight']
 
         # weight the metrics by tree hight
         metric_dict_weighted_by_tree_hight = {}
@@ -328,6 +331,9 @@ class InstanceSegmentationMetrics:
             # divide by the sum of the hights of the trees
             for parameter in interesting_parameters:
                 metric_dict_weighted_by_tree_hight[parameter] /= sum([metric_dict[label]['high_of_tree_gt'] for label in metric_dict.keys()])
+                if parameter == 'rmse_hight':
+                    # compute sqrt of the residual hight (we are computing RMSE)
+                    metric_dict_weighted_by_tree_hight[parameter] = metric_dict_weighted_by_tree_hight[parameter] ** 0.5
 
         # compute the mean of the metrics
         metric_dict_mean = {}
@@ -342,6 +348,9 @@ class InstanceSegmentationMetrics:
 
             for parameter in interesting_parameters:
                 metric_dict_mean[parameter] = metric_dict_mean[parameter] / len(metric_dict)
+                if parameter == 'rmse_hight':
+                    # compute sqrt of the residual hight (we are computing RMSE)
+                    metric_dict_mean[parameter] = metric_dict_mean[parameter] ** 0.5
 
         # compute tree level metrics
         if metric_dict:
diff --git a/metrics/instance_segmentation_metrics_in_folder.py b/metrics/instance_segmentation_metrics_in_folder.py
index e9b4eca42a34a3289e5396597dbe3b634cb4b1a5..f22b6b6dd7782a161d5865fbc7cbca3c9116cccd 100644
--- a/metrics/instance_segmentation_metrics_in_folder.py
+++ b/metrics/instance_segmentation_metrics_in_folder.py
@@ -9,7 +9,7 @@ from metrics.instance_segmentation_metrics import InstanceSegmentationMetrics
 from nibio_postprocessing.attach_labels_to_las_file_pred2gt import AttachLabelsToLasFilePred2Gt
 
 class InstanceSegmentationMetricsInFolder():
-    GT_LABEL_NAME = 'treeID'
+    GT_LABEL_NAME = 'treeID' # 'StemID' or 'treeID' 
     TARGET_LABEL_NAME = 'instance_nr'
 
     def __init__(
@@ -111,7 +111,16 @@ class InstanceSegmentationMetricsInFolder():
         #     f1_scores_weighted_list.append(f1_score_weighted)
 
         # calculate the mean f1 score of weighted f1 scores
-        mean_f1_score = sum(f1_scores_weighted_list) / len(f1_scores_weighted_list)
+        # mean_f1_score = sum(f1_scores_weighted_list) / len(f1_scores_weighted_list)
+
+        # use a dirty hack to compute the mean of the metrics (accounts for broken point clouds caused by phils code)
+
+        mean_f1_score = sum(f1_scores_weighted_list) / len(gt_las_file_paths)
+        # mean_f1_score = sum(f1_scores_weighted_list) / 16
+
+        print('numer of files: ' + str(len(gt_las_file_paths)))
+
+
         # calculate the mean metrics for all the elements in the metric_dict_list
         # create a mean_metrics dictionary and initialize it with zeros
         mean_metrics = {}
diff --git a/optimization_pipeline/tls.sh b/optimization_pipeline/tls.sh
index 996c1fd826cc19598ee5e7858f550eb06efa567b..9f76038b7a1759d0301abd1c1e758c875813126c 100755
--- a/optimization_pipeline/tls.sh
+++ b/optimization_pipeline/tls.sh
@@ -201,8 +201,6 @@ for segmented_point_cloud_in_ply in $data_folder/results/segmented_point_clouds/
     --writers.las.extra_dims=all
 done
 
-python nibio_preprocessing/add_ground_to_inst_seg_folders.py --sem_seg_folder sample_playground/results/segmented_point_clouds/ --inst_seg_folder sample_playground/results/instance_segmented_point_clouds/ --output_folder sample_playground/instance_seg_with_ground --verbose
-
 # create the instance segmented point clouds with ground folder
 mkdir -p $data_folder/results/instance_segmented_point_clouds_with_ground
 
diff --git a/run_bash_scripts/sem_seg_sean.sh b/run_bash_scripts/sem_seg_sean.sh
index 37327d22e00e3e92e4e4d7ab97ae25c8db55a01c..33ac27667abb60cc4b72f733efdc017b63e51964 100755
--- a/run_bash_scripts/sem_seg_sean.sh
+++ b/run_bash_scripts/sem_seg_sean.sh
@@ -3,12 +3,12 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env" # conda environment for running the pipeline
+CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
 
 # Parameters for the semetnic segmentation
 data_folder="" # path to the folder containing the data
-checkpoint_model_path="./fsct/model/model.pth"
-batch_size=5 # batch size for the inference
+checkpoint_model_path="./fsct/model/model.pth" # path to the checkpoint model (take our basic model as default)
+batch_size=10 # batch size for the inference
 tile_size=10 # tile size in meters
 min_density=75 # minimum density of points in a tile(used for removing small tiles)
 remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles
@@ -45,6 +45,11 @@ echo "d: data_folder"
 echo "      The values of the parameters:"
 echo "data_folder: $data_folder"
 echo "remove_small_tiles: $remove_small_tiles"
+echo "checkpoint_model_path: $checkpoint_model_path"
+echo "batch_size: $batch_size"
+echo "tile_size: $tile_size"
+echo "min_density: $min_density"
+
 
 # Do the environment setup
 # check if PYTHONPATH is set to the current directory
diff --git a/run_bash_scripts/tls.sh b/run_bash_scripts/tls.sh
index 2e3a19a031dc8cc54051261c80875524b2045e3d..9f76038b7a1759d0301abd1c1e758c875813126c 100755
--- a/run_bash_scripts/tls.sh
+++ b/run_bash_scripts/tls.sh
@@ -3,7 +3,7 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env" # conda environment for running the pipeline
+CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
 
 # Tiling parameters
 data_folder="" # path to the folder containing the data
diff --git a/sean_sem_seg/inference.py b/sean_sem_seg/inference.py
index 6c245028abbc2aba4364de938abc147b02c8f110..0950023a426b9713ee70cbf0dce93ecb9a0f4359 100644
--- a/sean_sem_seg/inference.py
+++ b/sean_sem_seg/inference.py
@@ -19,6 +19,10 @@ from tools import load_file, save_file
 import shutil
 import sys
 
+from other_parameters import other_parameters
+
+NUM_CLASSES = other_parameters['num_classes']
+
 sys.setrecursionlimit(10**8)  # Can be necessary for dealing with large point clouds.
 
 
@@ -60,11 +64,12 @@ def choose_most_confident_label(point_cloud, original_point_cloud):
     )
     _, indices = neighbours.kneighbors(original_point_cloud[:, :3])
 
-    labels = np.zeros((original_point_cloud.shape[0], 5))
-    labels[:, :4] = np.median(point_cloud[indices][:, :, -4:], axis=1)
-    labels[:, 4] = np.argmax(labels[:, :4], axis=1)
+    labels = np.zeros((original_point_cloud.shape[0], NUM_CLASSES + 1))
+    labels[:, :NUM_CLASSES] = np.median(point_cloud[indices][:, :, -NUM_CLASSES:], axis=1)
+    labels[:, NUM_CLASSES] = np.argmax(labels[:, :NUM_CLASSES], axis=1)
 
     original_point_cloud = np.hstack((original_point_cloud, labels[:, 4:]))
+
     return original_point_cloud
 
 class SemanticSegmentation:
@@ -99,7 +104,7 @@ class SemanticSegmentation:
 
         test_loader = DataLoader(test_dataset, batch_size=self.parameters["batch_size"], shuffle=False, num_workers=0)
 
-        model = Net(num_classes=4).to(self.device)
+        model = Net(num_classes=NUM_CLASSES).to(self.device)
         if self.parameters["use_CPU_only"]:
             model.load_state_dict(
                 torch.load(
diff --git a/sean_sem_seg/other_parameters.py b/sean_sem_seg/other_parameters.py
index 7c99795d204e8f4299f3cf2d6c37fe15927865a6..b1f86c14dea9935f119c208446e0cc5b176ae030 100644
--- a/sean_sem_seg/other_parameters.py
+++ b/sean_sem_seg/other_parameters.py
@@ -9,11 +9,14 @@ other_parameters = dict(
     box_overlap=[0.5, 0.5, 0.5],  # Overlap of the sliding box used for semantic segmentation.
     min_points_per_box=1000,  # Minimum number of points for input to the model. Too few points and it becomes near impossible to accurately label them (though assuming vegetation class is the safest bet here).
     max_points_per_box=20000,  # Maximum number of points for input to the model. The model may tolerate higher numbers if you decrease the batch size accordingly (to fit on the GPU), but this is not tested.
+    num_classes = 4,  # Number of classes in the model. Don't change this unless you are changing the model.
     noise_class=0,  # Don't change
     terrain_class=1,  # Don't change
     vegetation_class=2,  # Don't change
     cwd_class=3,  # Don't change
     stem_class=4,  # Don't change
+    branch_class=5,  # Don't change
+    low_vegetation_class=6,  # Don't change
     grid_resolution=0.5,  # Resolution of the DTM.
     vegetation_coverage_resolution=0.2,
     num_neighbours=5,
diff --git a/sean_sem_seg/post_segmentation_script.py b/sean_sem_seg/post_segmentation_script.py
index 380aa83fd1590193e8f31bcbf9611258213468ba..8a09ab5db9af1718e49c05e112d3aa4b0f06b1f2 100644
--- a/sean_sem_seg/post_segmentation_script.py
+++ b/sean_sem_seg/post_segmentation_script.py
@@ -26,6 +26,10 @@ from tools import load_file, save_file, subsample_point_cloud, get_heights_above
 from scipy.interpolate import griddata
 from fsct_exceptions import DataQualityError
 
+from other_parameters import other_parameters
+
+NUMBER_OF_CLASSES = other_parameters["num_classes"]
+
 warnings.filterwarnings("ignore", category=RuntimeWarning)
 
 
@@ -47,6 +51,8 @@ class PostProcessing:
         self.vegetation_class_label = parameters["vegetation_class"]
         self.cwd_class_label = parameters["cwd_class"]
         self.stem_class_label = parameters["stem_class"]
+        self.branch_class_label = parameters["branch_class"]
+        self.low_vegetation_class_label = parameters["low_vegetation_class"]
         print("Loading segmented point cloud...")
         self.point_cloud, self.headers_of_interest = load_file(
             self.output_dir + "segmented.las", headers_of_interest=["x", "y", "z", "red", "green", "blue", "label"]
@@ -106,6 +112,8 @@ class PostProcessing:
         return grid_points
 
     def process_point_cloud(self):
+        print("Processing point cloud...")
+
         self.terrain_points = self.point_cloud[
             self.point_cloud[:, self.label_index] == self.terrain_class_label
         ]  # -2 is now the class label as we added the height above DTM column.
@@ -126,6 +134,8 @@ class PostProcessing:
         self.point_cloud = get_heights_above_DTM(
             self.point_cloud, self.DTM
         )  # Add a height above DTM column to the point clouds.
+
+        # terrain points
         self.terrain_points = self.point_cloud[self.point_cloud[:, self.label_index] == self.terrain_class_label]
         self.terrain_points_rejected = np.vstack(
             (
@@ -146,6 +156,8 @@ class PostProcessing:
             headers_of_interest=self.headers_of_interest,
             silent=False,
         )
+
+        # stem points
         self.stem_points = self.point_cloud[self.point_cloud[:, self.label_index] == self.stem_class_label]
         self.terrain_points = np.vstack(
             (
@@ -167,6 +179,53 @@ class PostProcessing:
             silent=False,
         )
 
+
+        if NUMBER_OF_CLASSES == 6:
+            #branches
+            self.branch_points = self.point_cloud[self.point_cloud[:, self.label_index] == self.branch_class_label]
+            self.terrain_points = np.vstack(
+                (
+                    self.terrain_points,
+                    self.branch_points[
+                        np.logical_and(
+                            self.branch_points[:, -1] >= -above_and_below_DTM_trim_dist,
+                            self.branch_points[:, -1] <= above_and_below_DTM_trim_dist,
+                        )
+                    ],
+                )
+            )
+            self.branch_points_rejected = self.branch_points[self.branch_points[:, -1] <= above_and_below_DTM_trim_dist]
+            self.branch_points = self.branch_points[self.branch_points[:, -1] > above_and_below_DTM_trim_dist]
+            save_file(
+                self.output_dir + "branch_points.las",
+                self.branch_points,
+                headers_of_interest=self.headers_of_interest,
+                silent=False,
+            )
+
+            #low vegetation
+            self.low_vegetation_points = self.point_cloud[self.point_cloud[:, self.label_index] == self.low_vegetation_class_label]
+            low_vegetation_threshold = 0.1
+            self.terrain_points = np.vstack(
+                (
+                    self.terrain_points,
+                    self.low_vegetation_points[
+                        np.logical_and(
+                            self.low_vegetation_points[:, -1] >= -low_vegetation_threshold,
+                            self.low_vegetation_points[:, -1] <= low_vegetation_threshold,
+                        )
+                    ],
+                )
+            )
+            self.low_vegetation_points_rejected = self.low_vegetation_points[self.low_vegetation_points[:, -1] <= low_vegetation_threshold]
+            self.low_vegetation_points = self.low_vegetation_points[self.low_vegetation_points[:, -1] > low_vegetation_threshold]
+            save_file(
+                self.output_dir + "low_vegetation_points.las",
+                self.low_vegetation_points,
+                headers_of_interest=self.headers_of_interest,
+                silent=False,
+            )
+            
         self.vegetation_points = self.point_cloud[self.point_cloud[:, self.label_index] == self.vegetation_class_label]
         self.terrain_points = np.vstack(
             (
@@ -222,7 +281,19 @@ class PostProcessing:
         )
 
         self.terrain_points[:, self.label_index] = self.terrain_class_label
-        self.cleaned_pc = np.vstack((self.terrain_points, self.vegetation_points, self.cwd_points, self.stem_points))
+        
+        if NUMBER_OF_CLASSES == 6:
+            self.cleaned_pc = np.vstack((
+                self.terrain_points, 
+                self.vegetation_points,
+                self.cwd_points, 
+                self.stem_points, 
+                self.branch_points, 
+                self.low_vegetation_points
+                ))
+        else:
+            self.cleaned_pc = np.vstack((self.terrain_points, self.vegetation_points, self.cwd_points, self.stem_points))
+
         save_file(
             self.output_dir + "segmented_cleaned.las", self.cleaned_pc, headers_of_interest=self.headers_of_interest
         )