diff --git a/sean_sem_seg/post_segmentation_script.py b/sean_sem_seg/post_segmentation_script.py index 8a09ab5db9af1718e49c05e112d3aa4b0f06b1f2..cead3ae3e4d37d859fbf9daa278329bf0462e444 100644 --- a/sean_sem_seg/post_segmentation_script.py +++ b/sean_sem_seg/post_segmentation_script.py @@ -129,7 +129,7 @@ class PostProcessing: self.plot_area = self.convexhull.volume / 10000 # volume is area in 2d. print("Plot area is approximately", self.plot_area, "ha") - above_and_below_DTM_trim_dist = 0.2 + above_and_below_DTM_trim_dist = 0.5 # meters (BEFORE: 0.2) self.point_cloud = get_heights_above_DTM( self.point_cloud, self.DTM diff --git a/sean_sem_seg/run_single_file.py b/sean_sem_seg/run_single_file.py index 7961f1acfe3ba5196306b68b7b72d3814d77223e..d0e851afabfc731a19dd65406325cf375c10f1c1 100644 --- a/sean_sem_seg/run_single_file.py +++ b/sean_sem_seg/run_single_file.py @@ -76,6 +76,7 @@ if __name__ == "__main__": delete_working_directory=True, # Generally leave this on. Deletes the files used for segmentation after segmentation is finished. # You may wish to turn it off if you want to re-run/modify the segmentation code so you don't need to run pre-processing every time. minimise_output_size_mode=0, # Will delete a number of non-essential outputs to reduce storage use. + grid_resolution=0.1, # Resolution of the grid used for the DTM. ) parameters.update(other_parameters) @@ -88,12 +89,12 @@ if __name__ == "__main__": parameters=parameters, # Set below to 0 or 1 (or True/False). Each step requires the previous step to have been run already. # For standard use, just leave them all set to 1 except "clean_up_files". - preprocess=1, # Preparation for semantic segmentation. - segmentation=1, # Deep learning based semantic segmentation of the point cloud. - postprocessing=1, # Creates the DTM and applies some simple rules to clean up the segmented point cloud. - measure_plot=0, # The bulk of the plot measurement happens here. - make_report=0, # Generates a plot report, plot map, and some other figures. - clean_up_files=0, + preprocess=True, # Preparation for semantic segmentation. + segmentation=True, # Deep learning based semantic segmentation of the point cloud. + postprocessing=True, # Creates the DTM and applies some simple rules to clean up the segmented point cloud. + measure_plot=False, # The bulk of the plot measurement happens here. + make_report=False, # Generates a plot report, plot map, and some other figures. + clean_up_files=False, ) # Optionally deletes most of the large point cloud outputs to minimise storage requirements. # copy the output "segmented_cleaned.las" to the output directory