Skip to content
Snippets Groups Projects
Commit 2a958f0b authored by Maciej Wielgosz's avatar Maciej Wielgosz
Browse files

Merge branch 'master' of gitlab.nibio.no:maciekwielgosz/instance_segmentation_classic

parents a9c4b746 34864d8b
Branches
No related tags found
No related merge requests found
.git
build.sh
FROM nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu20.04
# install conda
ARG UBUNTU_VER=20.04
ARG CONDA_VER=latest
ARG OS_TYPE=x86_64
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
sudo \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libsndfile1 \
libtiff5 \
&& rm -rf /var/lib/apt/lists/*
RUN curl -LO "http://repo.continuum.io/miniconda/Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh" && \
bash Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh -p /miniconda -b && \
rm Miniconda3-${CONDA_VER}-Linux-${OS_TYPE}.sh
RUN /miniconda/bin/conda update conda
RUN /miniconda/bin/conda init bash
RUN /miniconda/bin/conda create --name pdal-env python=3.8.13
SHELL ["/miniconda/bin/conda", "run", "-n", "pdal-env", "/bin/bash", "-c"]
RUN echo "conda activate pdal-env" >> ~/.bashrc
RUN conda install -c conda-forge pdal==2.4.3 python-pdal==3.1.2
RUN pip install parse oci ocifs
COPY requirements.txt app/requirements.txt
RUN pip install --no-cache -r app/requirements.txt
COPY . /app
WORKDIR /app
ENTRYPOINT ["/miniconda/bin/conda", "run", "-n", "pdal-env", "python", "/app/run_oracle_wrapper.py"]
# CMD ["--help" ]
#/bin/sh
docker build -t nibio/pc-geoslam-oracle:latest .
...@@ -10,6 +10,7 @@ label_formats: ...@@ -10,6 +10,7 @@ label_formats:
semantic_segmentation_params: semantic_segmentation_params:
sematic_segmentation_script: './run_bash_scripts/sem_seg_sean.sh' sematic_segmentation_script: './run_bash_scripts/sem_seg_sean.sh'
checkpoint_model_path: './fsct/model/model.pth' checkpoint_model_path: './fsct/model/model.pth'
batch_size : 5 # batch size for inference
tile_size: 10 # tile size in meters tile_size: 10 # tile size in meters
min_density: 100 # minimum density of points in a tile(used for removing small tiles) min_density: 100 # minimum density of points in a tile(used for removing small tiles)
remove_small_tiles: 1 # 1: remove small tiles, 0: not remove small tiles remove_small_tiles: 1 # 1: remove small tiles, 0: not remove small tiles
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
############################ parameters ################################################# ############################ parameters #################################################
# General parameters # General parameters
CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder
CONDA_ENV="pdal-env-1" # conda environment for running the pipeline CONDA_ENV="pdal-env" # conda environment for running the pipeline
# Tiling parameters # Tiling parameters
data_folder="" # path to the folder containing the data data_folder="" # path to the folder containing the data
...@@ -201,8 +201,6 @@ for segmented_point_cloud_in_ply in $data_folder/results/segmented_point_clouds/ ...@@ -201,8 +201,6 @@ for segmented_point_cloud_in_ply in $data_folder/results/segmented_point_clouds/
--writers.las.extra_dims=all --writers.las.extra_dims=all
done done
python nibio_preprocessing/add_ground_to_inst_seg_folders.py --sem_seg_folder sample_playground/results/segmented_point_clouds/ --inst_seg_folder sample_playground/results/instance_segmented_point_clouds/ --output_folder sample_playground/instance_seg_with_ground --verbose
# create the instance segmented point clouds with ground folder # create the instance segmented point clouds with ground folder
mkdir -p $data_folder/results/instance_segmented_point_clouds_with_ground mkdir -p $data_folder/results/instance_segmented_point_clouds_with_ground
......
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
...@@ -36,6 +36,7 @@ def main(path_to_config_file): ...@@ -36,6 +36,7 @@ def main(path_to_config_file):
sem_seg_args.extend([ sem_seg_args.extend([
"-d", str(config["general"]["input_folder"]), "-d", str(config["general"]["input_folder"]),
"-c", str(config["semantic_segmentation_params"]["checkpoint_model_path"]), "-c", str(config["semantic_segmentation_params"]["checkpoint_model_path"]),
"-b", str(config["semantic_segmentation_params"]["batch_size"]),
"-t", str(config["semantic_segmentation_params"]["tile_size"]), "-t", str(config["semantic_segmentation_params"]["tile_size"]),
"-m", str(config["semantic_segmentation_params"]["min_density"]), "-m", str(config["semantic_segmentation_params"]["min_density"]),
"-z", str(config["semantic_segmentation_params"]["remove_small_tiles"]) "-z", str(config["semantic_segmentation_params"]["remove_small_tiles"])
......
run.sh 0 → 100755
#!/bin/bash
docker run --mount type=bind,src='/home/opc/git_repos/instance_segmentation_classic/config/config.yaml',dst='/app/current_config.yaml' --rm nibio/cuda-vscode-conda:latest --path_to_config_file /app/current_config.yaml
...@@ -3,11 +3,12 @@ ...@@ -3,11 +3,12 @@
############################ parameters ################################################# ############################ parameters #################################################
# General parameters # General parameters
CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder
CONDA_ENV="pdal-env-1" # conda environment for running the pipeline CONDA_ENV="pdal-env" # conda environment for running the pipeline
# Parameters for the semetnic segmentation # Parameters for the semetnic segmentation
data_folder="" # path to the folder containing the data data_folder="" # path to the folder containing the data
checkpoint_model_path="./fsct/model/model.pth" checkpoint_model_path="./fsct/model/model.pth"
batch_size=5 # batch size for the inference
tile_size=10 # tile size in meters tile_size=10 # tile size in meters
min_density=75 # minimum density of points in a tile(used for removing small tiles) min_density=75 # minimum density of points in a tile(used for removing small tiles)
remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles
...@@ -17,12 +18,14 @@ remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles ...@@ -17,12 +18,14 @@ remove_small_tiles=0 # 1: remove small tiles, 0: not remove small tiles
# extract tiling parameters as command line arguments with the same default values # extract tiling parameters as command line arguments with the same default values
# add remove_small_tiles parameter # add remove_small_tiles parameter
while getopts "d:c:t:m:z:" opt; do while getopts "d:c:b:t:m:z:" opt; do
case $opt in case $opt in
d) data_folder="$OPTARG" d) data_folder="$OPTARG"
;; ;;
c) checkpoint_model_path="$OPTARG" c) checkpoint_model_path="$OPTARG"
;; ;;
b) batch_size="$OPTARG"
;;
t) tile_size="$OPTARG" t) tile_size="$OPTARG"
;; ;;
m) min_density="$OPTARG" m) min_density="$OPTARG"
...@@ -145,7 +148,7 @@ for d in $data_folder/segmented_point_clouds/tiled/*/; do ...@@ -145,7 +148,7 @@ for d in $data_folder/segmented_point_clouds/tiled/*/; do
python sean_sem_seg/run_single_file.py \ python sean_sem_seg/run_single_file.py \
--model $checkpoint_model_path \ --model $checkpoint_model_path \
--point-cloud $f \ --point-cloud $f \
--batch_size 10 \ --batch_size $batch_size \
--odir $d \ --odir $d \
--verbose \ --verbose \
# --tile-index $d/tile_index.dat \ # --tile-index $d/tile_index.dat \
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
############################ parameters ################################################# ############################ parameters #################################################
# General parameters # General parameters
CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder
CONDA_ENV="pdal-env-1" # conda environment for running the pipeline CONDA_ENV="pdal-env" # conda environment for running the pipeline
# Tiling parameters # Tiling parameters
data_folder="" # path to the folder containing the data data_folder="" # path to the folder containing the data
......
# This is the the file to be run on the oracle cloud
import oci
import argparse
import os
import io
import sys
import json
import shutil
import yaml
from urllib.parse import urlparse
from pathlib import Path
from oci.config import validate_config
from oci.object_storage import ObjectStorageClient
def run_oracle_wrapper(path_to_config_file):
# read the config file with the credentials with json format
with open('login_oracle_config.json') as f:
config = json.load(f)
# validate the config file
validate_config(config)
# create the client
client = ObjectStorageClient(config)
# read system environment variables
input_location = os.environ['OBJ_INPUT_LOCATION']
output_location = os.environ['OBJ_OUTPUT_LOCATION']
# doing for the input
if input_location is not None:
print('Taking the input from the location ' + input_location)
parsed_url = urlparse(input_location)
input_folder_in_bucket = parsed_url.path[1:]
input_bucket_name = parsed_url.netloc.split('@')[0]
input_namespace = parsed_url.netloc.split('@')[1]
else:
print('Taking the input from the default location')
# get the input_namespace
input_namespace = client.get_input_namespace().data
# get the bucket name
input_bucket_name = 'bucket_lidar_data'
# folder name inside the bucket
input_folder_in_bucket = 'geoslam'
# doing for the output
if output_location is not None:
print('Saving the output to the location ' + output_location)
parsed_url = urlparse(output_location)
output_folder_in_bucket = parsed_url.path[1:]
output_bucket_name = parsed_url.netloc.split('@')[0]
output_namespace = parsed_url.netloc.split('@')[1]
else:
print('Saving the output to the default location')
# get the output_namespace
output_namespace = client.get_input_namespace().data
# get the bucket name
output_bucket_name = 'bucket_lidar_data'
# folder name inside the bucket
output_folder_in_bucket = 'output'
# read the config file from config folder
with open(path_to_config_file) as f:
config_flow_params = yaml.load(f, Loader=yaml.FullLoader)
# copy all files from the bucket to the input folder
# get the list of objects in the bucket
objects = client.list_objects(input_namespace, input_bucket_name).data.objects
# create the input folder if it does not exist
if not os.path.exists(config_flow_params['general']['input_folder']):
os.mkdir(config_flow_params['general']['input_folder'])
# download the files from the bucket to the input folder
for item in objects:
if item.name.split('/')[0] == input_folder_in_bucket:
if not (item.name.split('/')[1] == ''):
object_name = item.name.split('/')[1]
print('Downloading the file ' + object_name + ' from the bucket ' + input_bucket_name)
path_to_object = os.path.join(input_folder_in_bucket, object_name)
# get the object
file = client.get_object(input_namespace, input_bucket_name, path_to_object)
# write the object to a file
with open(object_name, 'wb') as f:
for chunk in file.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
# check if the file already exists in the input folder and delete it if it does
if os.path.exists(config_flow_params['general']['input_folder'] + '/' + object_name):
os.remove(config_flow_params['general']['input_folder'] + '/' + object_name)
# move the file to the input folder and overwrite if it already exists
shutil.move(object_name, config_flow_params['general']['input_folder'])
from run import main
# run the main function
main(path_to_config_file)
# instance segmentation is set to true
if config_flow_params['general']['run_instance_segmentation']:
path_to_the_output_folder = os.path.join(config_flow_params['general']['output_folder'], 'instance_segmented_point_clouds')
else:
path_to_the_output_folder = config_flow_params['general']['output_folder']
# get list of files in the output folder
list_of_files = os.listdir(path_to_the_output_folder)
# save files to the output bucket 'bucket_lidar_data' in the subfolder 'output'
for file in list_of_files:
# get the full path of the file
path_to_file = path_to_the_output_folder + '/' + file
# get the file name
file_name = file
# upload the file to the bucket
client.put_object(
output_namespace,
output_bucket_name,
os.path.join(output_folder_in_bucket, file_name),
io.open(path_to_file, 'rb')
)
if __name__ == '__main__':
# use argparse to get the path to the config file
parser = argparse.ArgumentParser()
parser.add_argument("--path_to_config_file", type=str, default="./config/config.yaml")
args = parser.parse_args()
# run the main function
print('Running the main function in run_oracle_wrapper.py')
run_oracle_wrapper(args.path_to_config_file)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment