diff --git a/config/config.yaml b/config/config.yaml index 3992d3efd51935f39ecd225bb7006f0551e47957..218457504d7caf8b9e3e145ac4d3afbcd61dfc65 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -10,6 +10,7 @@ label_formats: semantic_segmentation_params: sematic_segmentation_script: './run_bash_scripts/sem_seg_sean.sh' checkpoint_model_path: './fsct/model/model.pth' + batch_size : 5 # batch size for inference tile_size: 10 # tile size in meters min_density: 100 # minimum density of points in a tile(used for removing small tiles) remove_small_tiles: 1 # 1: remove small tiles, 0: not remove small tiles diff --git a/run.py b/run.py index a008ea3a94ff345e12e3dff0faf1a96aa400f541..41811f063d67425878f10c05503f1ead8e165657 100644 --- a/run.py +++ b/run.py @@ -36,6 +36,7 @@ def main(path_to_config_file): sem_seg_args.extend([ "-d", str(config["general"]["input_folder"]), "-c", str(config["semantic_segmentation_params"]["checkpoint_model_path"]), + "-b", str(config["semantic_segmentation_params"]["batch_size"]), "-t", str(config["semantic_segmentation_params"]["tile_size"]), "-m", str(config["semantic_segmentation_params"]["min_density"]), "-z", str(config["semantic_segmentation_params"]["remove_small_tiles"]) diff --git a/run_bash_scripts/sem_seg_sean.sh b/run_bash_scripts/sem_seg_sean.sh index 51c33952731db9f622605b4d74c91d0cd80e5f6d..37327d22e00e3e92e4e4d7ab97ae25c8db55a01c 100755 --- a/run_bash_scripts/sem_seg_sean.sh +++ b/run_bash_scripts/sem_seg_sean.sh @@ -3,11 +3,12 @@ ############################ parameters ################################################# # General parameters CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder -CONDA_ENV="pdal-env-1" # conda environment for running the pipeline +CONDA_ENV="pdal-env" # conda environment for running the pipeline # Parameters for the semetnic segmentation data_folder="" # path to the folder containing the data checkpoint_model_path="./fsct/model/model.pth" +batch_size=5 # batch size for the inference tile_size=10 # tile size in meters min_density=75 # minimum density of points in a tile(used for removing small tiles) remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles @@ -17,12 +18,14 @@ remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles # extract tiling parameters as command line arguments with the same default values # add remove_small_tiles parameter -while getopts "d:c:t:m:z:" opt; do +while getopts "d:c:b:t:m:z:" opt; do case $opt in d) data_folder="$OPTARG" ;; c) checkpoint_model_path="$OPTARG" ;; + b) batch_size="$OPTARG" + ;; t) tile_size="$OPTARG" ;; m) min_density="$OPTARG" @@ -145,7 +148,7 @@ for d in $data_folder/segmented_point_clouds/tiled/*/; do python sean_sem_seg/run_single_file.py \ --model $checkpoint_model_path \ --point-cloud $f \ - --batch_size 10 \ + --batch_size $batch_size \ --odir $d \ --verbose \ # --tile-index $d/tile_index.dat \ diff --git a/run_oracle_wrapper.py b/run_oracle_wrapper.py index 5f7f1b48f16c589ad4d4e9fbcdcfcc3959ee6a8d..5c6a063ee686284b73b98277bfbdfd6742c4607d 100644 --- a/run_oracle_wrapper.py +++ b/run_oracle_wrapper.py @@ -60,6 +60,20 @@ def run_oracle_wrapper(path_to_config_file): # run the main function main(path_to_config_file) + # get list of files in the output folder + list_of_files = os.listdir(config_flow_params['general']['output_folder']) + + # save files to the output bucket 'bucket_lidar_data' in the subfolder 'output' + for file in list_of_files: + # get the full path of the file + path_to_file = config_flow_params['general']['output_folder'] + '/' + file + + # get the file name + file_name = file + + # upload the file to the bucket + client.put_object(namespace, bucket_name, 'output/' + file_name, io.open(path_to_file, 'rb')) + if __name__ == '__main__': # use argparse to get the path to the config file parser = argparse.ArgumentParser()