diff --git a/build.sh b/build.sh
index 21f3b5c7b14fb257695c7a1d5ac45eeaa41c0269..b7d1484934c4e7754009e77ddb480731024eb7f6 100755
--- a/build.sh
+++ b/build.sh
@@ -1,3 +1,5 @@
 #/bin/sh
-
+# docker build -t nibio/pc-geoslam-oracle-inst-seg:latest .
+# docker build -t nibio/pc-geoslam-oracle-sem-seg:latest .
 docker build -t nibio/pc-geoslam-oracle:latest .
+
diff --git a/config/config.yaml b/config/config.yaml
index 2ef3fc019666164d439d6ef30b412777c8da15dd..3bba841b0edaadced1ad680241c8602771681a8e 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -1,9 +1,9 @@
 general:
   input_folder: './local_input_folder'
   output_folder: './local_output_folder'
-  clean_output_folder: true
+  clean_output_folder: false
   run_sematic_segmentation: true
-  run_instance_segmentation: true
+  run_instance_segmentation: false
 label_formats:
   label_for_instances_in_gt: 'treeID'
   label_for_instances_in_predicted: 'instance_nr'
diff --git a/run_bash_scripts/sem_seg_sean.sh b/run_bash_scripts/sem_seg_sean.sh
index 33ac27667abb60cc4b72f733efdc017b63e51964..1b9d1fe4bee713c5f16ef80999460e4d2844e8c1 100755
--- a/run_bash_scripts/sem_seg_sean.sh
+++ b/run_bash_scripts/sem_seg_sean.sh
@@ -3,7 +3,7 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
+CONDA_ENV="pdal-env" # conda environment for running the pipeline
 
 # Parameters for the semetnic segmentation
 data_folder="" # path to the folder containing the data
diff --git a/run_bash_scripts/tls.sh b/run_bash_scripts/tls.sh
index 9f76038b7a1759d0301abd1c1e758c875813126c..2e3a19a031dc8cc54051261c80875524b2045e3d 100755
--- a/run_bash_scripts/tls.sh
+++ b/run_bash_scripts/tls.sh
@@ -3,7 +3,7 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
+CONDA_ENV="pdal-env" # conda environment for running the pipeline
 
 # Tiling parameters
 data_folder="" # path to the folder containing the data
diff --git a/run_oracle_wrapper.py b/run_oracle_wrapper.py
index 550aabffe533773bb8b652440355cebd7bb4c729..097808ed7e144e2796f484b451a8a2b3b9ec26db 100644
--- a/run_oracle_wrapper.py
+++ b/run_oracle_wrapper.py
@@ -26,8 +26,15 @@ def run_oracle_wrapper(path_to_config_file):
     client = ObjectStorageClient(config)
 
     # read system environment variables
-    input_location = os.environ['OBJ_INPUT_LOCATION']
-    output_location = os.environ['OBJ_OUTPUT_LOCATION']
+    # input_location = os.environ['OBJ_INPUT_LOCATION']
+    # output_location = os.environ['OBJ_OUTPUT_LOCATION']
+
+    # input_location = "oci://maciej-seg-test-in@axqlz2potslu/las_files"
+    # output_location = "oci://maciej-seg-test-out@axqlz2potslu"
+
+    input_location = "oci://forestsens_temp@axqlz2potslu/acc_6/batch_274/original_las_files"
+    output_location = "oci://maciej-seg-test-out@axqlz2potslu"
+
 
     # doing for the input
     if input_location is not None:
@@ -37,6 +44,10 @@ def run_oracle_wrapper(path_to_config_file):
         input_bucket_name = parsed_url.netloc.split('@')[0]
         input_namespace = parsed_url.netloc.split('@')[1]
 
+        print("input_folder_in_bucket: ", input_folder_in_bucket)
+        print("input_bucket_name: ", input_bucket_name)
+        print("input_namespace: ", input_namespace)
+
     else:
         print('Taking the input from the default location')
         # get the input_namespace
@@ -69,7 +80,16 @@ def run_oracle_wrapper(path_to_config_file):
 
     # copy all files from the bucket to the input folder
     # get the list of objects in the bucket
-    objects = client.list_objects(input_namespace, input_bucket_name).data.objects
+    list_objects_response = client.list_objects(
+        namespace_name=input_namespace,
+        bucket_name=input_bucket_name,
+        prefix=input_folder_in_bucket,
+        limit=1000)
+
+    # objects = client.list_objects(input_namespace, input_bucket_name).data.objects
+    objects = list_objects_response.data.objects
+    # create a list of objects which contain also files not just folders
+    objects = [item for item in objects if item.name[-1] != '/']
 
     # create the input folder if it does not exist
     if not os.path.exists(config_flow_params['general']['input_folder']):
@@ -77,25 +97,26 @@ def run_oracle_wrapper(path_to_config_file):
 
     # download the files from the bucket to the input folder
     for item in objects:
-        if item.name.split('/')[0] == input_folder_in_bucket:
-            if not (item.name.split('/')[1] == ''):
-                object_name = item.name.split('/')[1]
-
-                print('Downloading the file ' + object_name + ' from the bucket ' + input_bucket_name)
-                path_to_object = os.path.join(input_folder_in_bucket, object_name)
-                # get the object
-                file = client.get_object(input_namespace, input_bucket_name, path_to_object)
-
-                # write the object to a file
-                with open(object_name, 'wb') as f:
-                    for chunk in file.data.raw.stream(1024 * 1024, decode_content=False):
-                        f.write(chunk)
-
-                # check if the file already exists in the input folder and delete it if it does
-                if os.path.exists(config_flow_params['general']['input_folder'] + '/' + object_name):
-                    os.remove(config_flow_params['general']['input_folder'] + '/' + object_name)
-                # move the file to the input folder and overwrite if it already exists
-                shutil.move(object_name, config_flow_params['general']['input_folder'])
+        object_name = item.name
+        print("object name: ", object_name)
+
+        # Get the object's data
+        file = client.get_object(input_namespace, input_bucket_name, object_name)
+
+        # Use only the base name of the object for the local file
+        local_file_name = os.path.basename(object_name)
+
+        # Open the local file
+        with open(local_file_name, 'wb') as f:
+            for chunk in file.data.raw.stream(1024 * 1024, decode_content=False):
+                f.write(chunk)
+
+        # remove the file if it already exists
+        if os.path.exists(os.path.join(config_flow_params['general']['input_folder'], object_name)):
+            os.remove(os.path.join(config_flow_params['general']['input_folder'], object_name))
+
+        # move the file to the input folder and overwrite if it already exists
+        shutil.move(local_file_name, config_flow_params['general']['input_folder'])
 
     from run import main
 
@@ -104,7 +125,7 @@ def run_oracle_wrapper(path_to_config_file):
 
     # instance segmentation is set to true
     if config_flow_params['general']['run_instance_segmentation']:
-        path_to_the_output_folder = os.path.join(config_flow_params['general']['output_folder'], 'instance_segmented_point_clouds')
+        path_to_the_output_folder = os.path.join(config_flow_params['general']['output_folder'], 'instance_segmented_point_clouds_with_ground')
     else:
         path_to_the_output_folder = config_flow_params['general']['output_folder']
 
@@ -114,16 +135,13 @@ def run_oracle_wrapper(path_to_config_file):
     # save files to the output bucket 'bucket_lidar_data' in the subfolder 'output'
     for file in list_of_files:
         # get the full path of the file
-        path_to_file = path_to_the_output_folder + '/' + file
-
-        # get the file name
-        file_name = file
+        path_to_file = os.path.join(path_to_the_output_folder, file)
 
         # upload the file to the bucket
         client.put_object(
             output_namespace, 
             output_bucket_name, 
-            os.path.join(output_folder_in_bucket, file_name), 
+            os.path.join(output_folder_in_bucket, file), 
             io.open(path_to_file, 'rb')
             )