diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..515b24ada4fc00440dbad4fb62b7e237b7a5f714
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+.git
+build.sh
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..23a4071edaa554ab608117eefc3f3c13f8b3dab5
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,49 @@
+FROM nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu20.04
+
+# install conda
+ARG UBUNTU_VER=20.04
+ARG CONDA_VER=latest
+ARG OS_TYPE=x86_64
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+    curl \
+    sudo \
+    libglib2.0-0 \
+    libsm6 \
+    libxext6 \
+    libxrender-dev \
+    libsndfile1 \
+    libtiff5 \
+    && rm -rf /var/lib/apt/lists/* 
+
+
+RUN curl -LO "http://repo.continuum.io/miniconda/Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh" && \
+    bash Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh -p /miniconda -b && \
+    rm Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh 
+
+RUN /miniconda/bin/conda update conda 
+
+RUN /miniconda/bin/conda init bash
+RUN /miniconda/bin/conda create --name pdal-env python=3.8.13
+
+SHELL ["/miniconda/bin/conda", "run", "-n", "pdal-env", "/bin/bash", "-c"]
+
+RUN echo "conda activate pdal-env" >> ~/.bashrc
+
+RUN conda install -c conda-forge pdal==2.4.3 python-pdal==3.1.2
+
+RUN pip install parse oci ocifs
+
+COPY requirements.txt app/requirements.txt
+RUN pip install --no-cache -r app/requirements.txt
+
+COPY . /app
+
+WORKDIR /app
+
+ENTRYPOINT ["/miniconda/bin/conda", "run", "-n", "pdal-env", "python", "/app/run_oracle_wrapper.py"]
+
+
+# CMD ["--help" ]
+
+
diff --git a/build.sh b/build.sh
new file mode 100755
index 0000000000000000000000000000000000000000..21f3b5c7b14fb257695c7a1d5ac45eeaa41c0269
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,3 @@
+#/bin/sh
+
+docker build -t nibio/pc-geoslam-oracle:latest .
diff --git a/config/config.yaml b/config/config.yaml
index fc9e44d3de6f0776dd5450698dc6d5626183b2a3..22812fdeb7b4caa6c725456964d0d03d5a599aa7 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -10,6 +10,7 @@ label_formats:
 semantic_segmentation_params:
   sematic_segmentation_script: './run_bash_scripts/sem_seg_sean.sh'
   checkpoint_model_path: './fsct/model/model.pth'
+  batch_size : 5 # batch size for inference
   tile_size: 10 # tile size in meters
   min_density: 100 # minimum density of points in a tile(used for removing small tiles)
   remove_small_tiles: 1 # 1: remove small tiles, 0: not remove small tiles
diff --git a/optimization_pipeline/tls.sh b/optimization_pipeline/tls.sh
index 996c1fd826cc19598ee5e7858f550eb06efa567b..2e3a19a031dc8cc54051261c80875524b2045e3d 100755
--- a/optimization_pipeline/tls.sh
+++ b/optimization_pipeline/tls.sh
@@ -3,7 +3,7 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
+CONDA_ENV="pdal-env" # conda environment for running the pipeline
 
 # Tiling parameters
 data_folder="" # path to the folder containing the data
@@ -201,8 +201,6 @@ for segmented_point_cloud_in_ply in $data_folder/results/segmented_point_clouds/
     --writers.las.extra_dims=all
 done
 
-python nibio_preprocessing/add_ground_to_inst_seg_folders.py --sem_seg_folder sample_playground/results/segmented_point_clouds/ --inst_seg_folder sample_playground/results/instance_segmented_point_clouds/ --output_folder sample_playground/instance_seg_with_ground --verbose
-
 # create the instance segmented point clouds with ground folder
 mkdir -p $data_folder/results/instance_segmented_point_clouds_with_ground
 
diff --git a/requirements.txt b/requirements.txt
index 5eee61352f1e6c101f7c30e1d72d92191974885f..9e761f979fd0727e61cbb3784f62c165ce705528 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/run.py b/run.py
index a008ea3a94ff345e12e3dff0faf1a96aa400f541..41811f063d67425878f10c05503f1ead8e165657 100644
--- a/run.py
+++ b/run.py
@@ -36,6 +36,7 @@ def main(path_to_config_file):
         sem_seg_args.extend([
             "-d", str(config["general"]["input_folder"]),
             "-c", str(config["semantic_segmentation_params"]["checkpoint_model_path"]),
+            "-b", str(config["semantic_segmentation_params"]["batch_size"]),
             "-t", str(config["semantic_segmentation_params"]["tile_size"]),
             "-m", str(config["semantic_segmentation_params"]["min_density"]),
             "-z", str(config["semantic_segmentation_params"]["remove_small_tiles"])
diff --git a/run.sh b/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..90fc3c2294a0670e9d86efe69ffde6563021e86a
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+docker run --mount type=bind,src='/home/opc/git_repos/instance_segmentation_classic/config/config.yaml',dst='/app/current_config.yaml' --rm nibio/cuda-vscode-conda:latest --path_to_config_file /app/current_config.yaml
diff --git a/run_bash_scripts/sem_seg_sean.sh b/run_bash_scripts/sem_seg_sean.sh
index 51c33952731db9f622605b4d74c91d0cd80e5f6d..37327d22e00e3e92e4e4d7ab97ae25c8db55a01c 100755
--- a/run_bash_scripts/sem_seg_sean.sh
+++ b/run_bash_scripts/sem_seg_sean.sh
@@ -3,11 +3,12 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
+CONDA_ENV="pdal-env" # conda environment for running the pipeline
 
 # Parameters for the semetnic segmentation
 data_folder="" # path to the folder containing the data
 checkpoint_model_path="./fsct/model/model.pth"
+batch_size=5 # batch size for the inference
 tile_size=10 # tile size in meters
 min_density=75 # minimum density of points in a tile(used for removing small tiles)
 remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles
@@ -17,12 +18,14 @@ remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles
 # extract tiling parameters as command line arguments with the same default values
 
 # add remove_small_tiles parameter
-while getopts "d:c:t:m:z:" opt; do
+while getopts "d:c:b:t:m:z:" opt; do
   case $opt in
     d) data_folder="$OPTARG"
     ;;
     c) checkpoint_model_path="$OPTARG"
     ;;
+    b) batch_size="$OPTARG"
+    ;;
     t) tile_size="$OPTARG"
     ;;
     m) min_density="$OPTARG"
@@ -145,7 +148,7 @@ for d in $data_folder/segmented_point_clouds/tiled/*/; do
         python sean_sem_seg/run_single_file.py \
         --model $checkpoint_model_path \
         --point-cloud $f \
-        --batch_size 10 \
+        --batch_size $batch_size \
         --odir $d \
         --verbose \
         # --tile-index $d/tile_index.dat \
diff --git a/run_bash_scripts/tls.sh b/run_bash_scripts/tls.sh
index 9f76038b7a1759d0301abd1c1e758c875813126c..2e3a19a031dc8cc54051261c80875524b2045e3d 100755
--- a/run_bash_scripts/tls.sh
+++ b/run_bash_scripts/tls.sh
@@ -3,7 +3,7 @@
 ############################ parameters #################################################
 # General parameters
 CLEAR_INPUT_FOLDER=1  # 1: clear input folder, 0: not clear input folder
-CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
+CONDA_ENV="pdal-env" # conda environment for running the pipeline
 
 # Tiling parameters
 data_folder="" # path to the folder containing the data
diff --git a/run_oracle_wrapper.py b/run_oracle_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..550aabffe533773bb8b652440355cebd7bb4c729
--- /dev/null
+++ b/run_oracle_wrapper.py
@@ -0,0 +1,139 @@
+# This is the the file to be run on the oracle cloud
+
+import oci
+import argparse
+import os
+import io
+import sys
+import json
+import shutil
+import yaml
+from urllib.parse import urlparse
+from pathlib import Path
+from oci.config import validate_config
+from oci.object_storage import ObjectStorageClient
+
+
+def run_oracle_wrapper(path_to_config_file):
+    # read the config file with the credentials with json format
+    with open('login_oracle_config.json') as f:
+        config = json.load(f)
+
+    # validate the config file
+    validate_config(config)
+
+    # create the client
+    client = ObjectStorageClient(config)
+
+    # read system environment variables
+    input_location = os.environ['OBJ_INPUT_LOCATION']
+    output_location = os.environ['OBJ_OUTPUT_LOCATION']
+
+    # doing for the input
+    if input_location is not None:
+        print('Taking the input from the location ' + input_location)
+        parsed_url = urlparse(input_location)
+        input_folder_in_bucket = parsed_url.path[1:]
+        input_bucket_name = parsed_url.netloc.split('@')[0]
+        input_namespace = parsed_url.netloc.split('@')[1]
+
+    else:
+        print('Taking the input from the default location')
+        # get the input_namespace
+        input_namespace = client.get_input_namespace().data
+        # get the bucket name
+        input_bucket_name = 'bucket_lidar_data'
+        # folder name inside the bucket
+        input_folder_in_bucket = 'geoslam'
+
+    # doing for the output
+    if output_location is not None:
+        print('Saving the output to the location ' + output_location)
+        parsed_url = urlparse(output_location)
+        output_folder_in_bucket = parsed_url.path[1:]
+        output_bucket_name = parsed_url.netloc.split('@')[0]
+        output_namespace = parsed_url.netloc.split('@')[1]
+
+    else:
+        print('Saving the output to the default location')
+        # get the output_namespace
+        output_namespace = client.get_input_namespace().data
+        # get the bucket name
+        output_bucket_name = 'bucket_lidar_data'
+        # folder name inside the bucket
+        output_folder_in_bucket = 'output'
+
+    # read the config file from config folder
+    with open(path_to_config_file) as f:
+        config_flow_params = yaml.load(f, Loader=yaml.FullLoader)
+
+    # copy all files from the bucket to the input folder
+    # get the list of objects in the bucket
+    objects = client.list_objects(input_namespace, input_bucket_name).data.objects
+
+    # create the input folder if it does not exist
+    if not os.path.exists(config_flow_params['general']['input_folder']):
+        os.mkdir(config_flow_params['general']['input_folder'])
+
+    # download the files from the bucket to the input folder
+    for item in objects:
+        if item.name.split('/')[0] == input_folder_in_bucket:
+            if not (item.name.split('/')[1] == ''):
+                object_name = item.name.split('/')[1]
+
+                print('Downloading the file ' + object_name + ' from the bucket ' + input_bucket_name)
+                path_to_object = os.path.join(input_folder_in_bucket, object_name)
+                # get the object
+                file = client.get_object(input_namespace, input_bucket_name, path_to_object)
+
+                # write the object to a file
+                with open(object_name, 'wb') as f:
+                    for chunk in file.data.raw.stream(1024 * 1024, decode_content=False):
+                        f.write(chunk)
+
+                # check if the file already exists in the input folder and delete it if it does
+                if os.path.exists(config_flow_params['general']['input_folder'] + '/' + object_name):
+                    os.remove(config_flow_params['general']['input_folder'] + '/' + object_name)
+                # move the file to the input folder and overwrite if it already exists
+                shutil.move(object_name, config_flow_params['general']['input_folder'])
+
+    from run import main
+
+    # run the main function
+    main(path_to_config_file)
+
+    # instance segmentation is set to true
+    if config_flow_params['general']['run_instance_segmentation']:
+        path_to_the_output_folder = os.path.join(config_flow_params['general']['output_folder'], 'instance_segmented_point_clouds')
+    else:
+        path_to_the_output_folder = config_flow_params['general']['output_folder']
+
+    # get list of files in the output folder
+    list_of_files = os.listdir(path_to_the_output_folder)
+
+    # save files to the output bucket 'bucket_lidar_data' in the subfolder 'output'
+    for file in list_of_files:
+        # get the full path of the file
+        path_to_file = path_to_the_output_folder + '/' + file
+
+        # get the file name
+        file_name = file
+
+        # upload the file to the bucket
+        client.put_object(
+            output_namespace, 
+            output_bucket_name, 
+            os.path.join(output_folder_in_bucket, file_name), 
+            io.open(path_to_file, 'rb')
+            )
+
+if __name__ == '__main__':
+    # use argparse to get the path to the config file
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--path_to_config_file", type=str, default="./config/config.yaml")
+    args = parser.parse_args()
+
+    # run the main function
+    print('Running the main function in run_oracle_wrapper.py')
+    run_oracle_wrapper(args.path_to_config_file)
+