Skip to content
Snippets Groups Projects
Commit fa0fff84 authored by Maciej Wielgosz's avatar Maciej Wielgosz
Browse files

instance seg. metrics progress, chemfer

parent f21da9f7
No related branches found
No related tags found
No related merge requests found
files_formats:
label_for_instances_in_gt: 'treeID'
label_for_instances_in_predicted:
# remove all the files in the source folder
SOURCE_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
rm -rf $SOURCE_FOLDER/*
cp /home/nibio/mutable-outside-world/data/raw_for_pipeline_test/Plot69_2022-06-15_09-08-53_9pct_time.las \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
# change name of the file to first.laz
mv /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/Plot69_2022-06-15_09-08-53_9pct_time.las \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.las
# copy first.laz to second.laz
cp /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.las \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/second.las
\ No newline at end of file
# remove all the files in the source folder
SOURCE_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
rm -rf $SOURCE_FOLDER/*
cp /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/playground_data/* \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
\ No newline at end of file
SOURCE_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
rm -rf $SOURCE_FOLDER/*
cp /home/nibio/mutable-outside-world/data/small_file_pipeline_test/small_file_pipeline_test.las \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
# change name of the file to first.laz
mv /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/small_file_pipeline_test.las \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.las
# copy first.laz to second.laz
# cp /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.las \
# /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/second.las
\ No newline at end of file
# remove all the files in the source folder
SOURCE_FOLDER=/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
rm -rf $SOURCE_FOLDER/*
cp /home/nibio/mutable-outside-world/data/strange_shape_cloud_for_pipeline_test/2022-08-05_11-03-31_9pct_time_scan.laz \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground
# change name of the file to first.laz
mv /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/2022-08-05_11-03-31_9pct_time_scan.laz \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.laz
# # copy first.laz to second.laz
cp /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/first.laz \
/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/second.laz
\ No newline at end of file
import pdal import pdal
import json import json
import os import os
import argparse
class FindMatchesUsingChamfer: class FindMatchesUsingChamfer:
def __init__(self, gt_folder, pred_folder, verbose=False): def __init__(self, gt_folder, pred_folder, verbose=False):
self.gt_folder = gt_folder self.gt_folder = gt_folder
self.pred_folder = pred_folder self.pred_folder = pred_folder
self.verbose = verbose
def compare_gt_pred(self, las_gt, las_pred): def compare_gt_pred(self, las_gt, las_pred):
# run pdal chamfer # run pdal chamfer
...@@ -16,9 +18,54 @@ class FindMatchesUsingChamfer: ...@@ -16,9 +18,54 @@ class FindMatchesUsingChamfer:
output = json.loads(output) output = json.loads(output)
return output['chamfer'] return output['chamfer']
def run_in_folders(self):
# get all las files in gt and pred folder
gt_files = [os.path.join(self.gt_folder, file) for file in os.listdir(self.gt_folder) if file.endswith(".las")]
pred_files = [os.path.join(self.pred_folder, file) for file in os.listdir(self.pred_folder) if file.endswith(".las")]
# compare all gt and pred files
# define a dictionary to store the results
results = {}
for gt_file in gt_files:
for pred_file in pred_files:
print(gt_file, pred_file)
print(self.compare_gt_pred(gt_file, pred_file))
results[(gt_file, pred_file)] = self.compare_gt_pred(gt_file, pred_file)
# sort the results in ascending order
results = {k: v for k, v in sorted(results.items(), key=lambda item: item[1])}
# print the first 10 results
for i, result in enumerate(results):
if i < 10:
print(result, results[result])
else:
break
# save dictionary as csv using pandas
import pandas as pd
df = pd.DataFrame.from_dict(results, orient='index')
df.to_csv('results_chamfer.csv')
if __name__ == '__main__': if __name__ == '__main__':
gt_file = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/input_data/output/0.las' # use argparse to parse the arguments
pred_file = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds_with_ground/output/0.las' parser = argparse.ArgumentParser()
FindMatchesUsingChamfer('', '').compare_gt_pred(gt_file, pred_file) parser.add_argument('--gt_folder', type=str, required=True, help='Path to the ground truth folder.')
parser.add_argument('--pred_folder', type=str, required=True, help='Path to the predicted folder.')
parser.add_argument('--verbose', type=bool, default=False, help='Print the output of pdal chamfer.')
args = parser.parse_args()
# run the class
find_matches = FindMatchesUsingChamfer(args.gt_folder, args.pred_folder, args.verbose)
find_matches.run_in_folders()
# gt_file = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/input_data/output/0.las'
# pred_file = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds_with_ground/output/0.las'
# FindMatchesUsingChamfer('', '').compare_gt_pred(gt_file, pred_file)
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import glob import glob
import argparse import argparse
import os import os
import joblib
import laspy import laspy
import logging import logging
...@@ -22,8 +23,6 @@ class InstanceSegmentationMetrics(): ...@@ -22,8 +23,6 @@ class InstanceSegmentationMetrics():
# get different classes from gt and pred # get different classes from gt and pred
gt_classes = np.unique(las_gt.treeID) gt_classes = np.unique(las_gt.treeID)
# remove 0 from gt_classes as it is the background class
gt_classes = gt_classes[gt_classes != 0]
pred_classes = np.unique(las_pred.instance_nr) pred_classes = np.unique(las_pred.instance_nr)
# put x, y, z, for different classes in a dictionary # put x, y, z, for different classes in a dictionary
...@@ -61,30 +60,24 @@ class InstanceSegmentationMetrics(): ...@@ -61,30 +60,24 @@ class InstanceSegmentationMetrics():
sorted_overlap = sorted(overlap.items(), key=lambda x: x[1], reverse=True) sorted_overlap = sorted(overlap.items(), key=lambda x: x[1], reverse=True)
sorted_overlap_points = sorted(overlap_points.items(), key=lambda x: x[1], reverse=True) sorted_overlap_points = sorted(overlap_points.items(), key=lambda x: x[1], reverse=True)
# # print the number of classes in gt and pred
# logging.info('Number of classes in gt: {}'.format(len(gt_classes))) # # find overlap between gt 0 and 0 pred classes using subsampling and voxel size 0.5
# logging.info('Number of classes in pred: {}'.format(len(pred_classes))) # logging.info('Overlap between gt 0 and pred 0 using subsampling and voxel size 0.5: {}'.format(self.get_overlap(gt_dict[0], pred_dict[0], subsample=True, voxel_size=0.5)))
# # print the number of points in gt and pred # # find overlap between gt 0 and 0 pred classes using subsampling and voxel size 0.05
# logging.info('Number of points in gt: {}'.format(sum(gt_dict_points.values()))) # logging.info('Overlap between gt 0 and pred 0 using subsampling and voxel size 0.05: {}'.format(self.get_overlap(gt_dict[0], pred_dict[0], subsample=True, voxel_size=0.05)))
# logging.info('Number of points in pred: {}'.format(sum(pred_dict_points.values())))
# # print the number of points in gt and pred per class # # find overlap between gt 0 and 0 pred classes without subsampling
# logging.info('Number of points in gt per class: {}'.format(gt_dict_points)) # logging.info('Overlap between gt 0 and pred 0 without subsampling: {}'.format(self.get_overlap(gt_dict[0], pred_dict[0], subsample=False)))
# logging.info('Number of points in pred per class: {}'.format(pred_dict_points))
# # print the number of overlapping points per class # find overlap between gt 0 and and other pred classes using subsampling and voxel size 0.5
# logging.info('Number of overlapping points per class: {}'.format(overlap_points)) for i in range(1, len(pred_dict)):
logging.info('Overlap between gt 0 and pred {} using subsampling and voxel size 0.5: {}'.format(i, self.get_overlap(gt_dict[0], pred_dict[i], subsample=True, voxel_size=0.5)))
# # print sorted overlap
# logging.info('Sorted overlap: {}'.format(sorted_overlap))
# find overlap between gt 39 and pred 6
logging.info('Overlap between gt 39 and pred 6: {}'.format(overlap[(39, 6)]))
# find overlap between gt 39 and pred 2
logging.info('Overlap between gt 39 and pred 2: {}'.format(overlap[(39, 2)]))
# find overlap between gt 9 and and other pred classes using subsampling and voxel size 0.5
for i in range(1, len(pred_dict)):
logging.info('Overlap between gt 9 and pred {} using subsampling and voxel size 0.5: {}'.format(i, self.get_overlap(gt_dict[9], pred_dict[i], subsample=True, voxel_size=0.1)))
# print sorted overlap for first 10 classes # print sorted overlap for first 10 classes
# logging.info('Sorted overlap for classes: {}'.format(sorted_overlap)) # logging.info('Sorted overlap for classes: {}'.format(sorted_overlap))
...@@ -92,16 +85,51 @@ class InstanceSegmentationMetrics(): ...@@ -92,16 +85,51 @@ class InstanceSegmentationMetrics():
# # print best match for classes along with overlap # # print best match for classes along with overlap
# logging.info('Best match for classes: {}'.format(best_match)) # logging.info('Best match for classes: {}'.format(best_match))
def get_overlap(self, gt, pred, subsample=False, voxel_size=0.1):
# compute overlap between gt and pred
# if subsample is True, subsample the point cloud
if subsample:
gt = self.subsample(gt, voxel_size)
pred = self.subsample(pred, voxel_size)
# get the number of points in gt and pred
def get_overlap(self, gt, pred):
# compute overlap between gt and pred
overlap = np.intersect1d(gt, pred).shape[0] overlap = np.intersect1d(gt, pred).shape[0]
# overlap = np.intersect1d(gt, pred).shape[0]
# overlap = np.sum(np.all(gt[:, None] == pred, axis=-1), axis=0) # overlap = np.sum(np.all(gt[:, None] == pred, axis=-1), axis=0)
return overlap return overlap
def subsample(self, tensor_x_y_z, voxel_size=0.1):
logging.info('Subsampling...')
non_empty_voxel_keys, inverse, nb_pts_per_voxel = \
np.unique(((tensor_x_y_z - np.min(tensor_x_y_z, axis=0)) // voxel_size).astype(int), axis=0, return_inverse=True, return_counts=True)
idx_pts_vox_sorted=np.argsort(inverse)
voxel_grid={}
grid_barycenter,grid_candidate_center=[],[]
def grid_subsampling(non_empty_voxel_keys):
last_seen=0
for idx,vox in enumerate(non_empty_voxel_keys):
voxel_grid[tuple(vox)]=tensor_x_y_z[idx_pts_vox_sorted[last_seen:last_seen+nb_pts_per_voxel[idx]]]
grid_barycenter.append(np.mean(voxel_grid[tuple(vox)],axis=0))
grid_candidate_center.append(voxel_grid[tuple(vox)][np.linalg.norm(voxel_grid[tuple(vox)]-np.mean(voxel_grid[tuple(vox)],axis=0),axis=1).argmin()])
last_seen+=nb_pts_per_voxel[idx]
return grid_candidate_center
# use joblib to parallelize the computation of the for loop
grid_candidate_center = joblib.Parallel(n_jobs=12)(joblib.delayed(grid_subsampling)(non_empty_voxel_keys) for i in range(12))
# merge the results
grid_candidate_center = np.concatenate(grid_candidate_center, axis=0)
grid_candidate_center = np.array(grid_candidate_center)
new_points = grid_candidate_center.transpose()
return new_points
def get_metrics_for_all_point_clouds(self): def get_metrics_for_all_point_clouds(self):
# get all las files in gt and pred folders using glob # get all las files in gt and pred folders using glob
las_gt = glob.glob(os.path.join(self.gt_folder, '*.las')) las_gt = glob.glob(os.path.join(self.gt_folder, '*.las'))
...@@ -116,11 +144,17 @@ class InstanceSegmentationMetrics(): ...@@ -116,11 +144,17 @@ class InstanceSegmentationMetrics():
self.get_metrics_for_single_point_cloud(las_gt[i], las_pred[i]) self.get_metrics_for_single_point_cloud(las_gt[i], las_pred[i])
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() # parser = argparse.ArgumentParser()
parser.add_argument('--gt_folder', type=str, required=True, help='Path to the folder containing ground truth point clouds.') # parser.add_argument('--gt_folder', type=str, required=True, help='Path to the folder containing ground truth point clouds.')
parser.add_argument('--pred_folder', type=str, required=True, help='Path to the folder containing predicted point clouds.') # parser.add_argument('--pred_folder', type=str, required=True, help='Path to the folder containing predicted point clouds.')
args = parser.parse_args() # args = parser.parse_args()
# # create an instance of InstanceSegmentationMetrics class
# instance_segmentation_metrics = InstanceSegmentationMetrics(args.gt_folder, args.pred_folder)
input_file_path = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/input_data'
instance_segmented_file_path = '/home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds_with_ground'
instance_segmentation_metrics = InstanceSegmentationMetrics(input_file_path, instance_segmented_file_path)
# create an instance of InstanceSegmentationMetrics class
instance_segmentation_metrics = InstanceSegmentationMetrics(args.gt_folder, args.pred_folder)
instance_segmentation_metrics.get_metrics_for_all_point_clouds() instance_segmentation_metrics.get_metrics_for_all_point_clouds()
...@@ -5,6 +5,8 @@ import os ...@@ -5,6 +5,8 @@ import os
from unicodedata import name from unicodedata import name
import laspy import laspy
import numpy as np import numpy as np
import pandas as pd
import pickle
class GetInstancesSideBySide(): class GetInstancesSideBySide():
...@@ -41,8 +43,20 @@ class GetInstancesSideBySide(): ...@@ -41,8 +43,20 @@ class GetInstancesSideBySide():
# process each file # process each file
# create a new csv file using pandas # create a new csv file using pandas
import pandas as pd df = pd.DataFrame(columns=[
df = pd.DataFrame(columns=['file_name', 'instance_label', 'min_x', 'max_x', 'min_y', 'max_y', 'min_z', 'max_z', 'number_of_points']) 'file_name',
'instance_label',
'min_x',
'max_x',
'min_y',
'max_y',
'min_z',
'max_z',
'number_of_points'
])
# create a dictionary to store the points for each instance
low_ground_points_dict = {}
for file in files: for file in files:
...@@ -66,6 +80,14 @@ class GetInstancesSideBySide(): ...@@ -66,6 +80,14 @@ class GetInstancesSideBySide():
max_y = np.max(points[:, 1]) max_y = np.max(points[:, 1])
max_z = np.max(points[:, 2]) max_z = np.max(points[:, 2])
# define the new coordinates of a size of the old one
points_low_ground = np.zeros_like(points)
# get points to z dimension of 0.5 meter
points_low_ground[:, 2] = points[:, 2] - min_z
# get only the points which are lower than 0.1 meter
points_low_ground = points_low_ground[points_low_ground[:, 2] < 0.3]
# save the points to the
# add parameters to the dataframe # add parameters to the dataframe
df = df.append( df = df.append(
{ {
...@@ -80,7 +102,9 @@ class GetInstancesSideBySide(): ...@@ -80,7 +102,9 @@ class GetInstancesSideBySide():
'number_of_points': len(points) 'number_of_points': len(points)
}, ignore_index=True) }, ignore_index=True)
# add the points to the dictionary
low_ground_points_dict[str(instance_label)] = points_low_ground
# zero the coordinates # zero the coordinates
points[:, 0] = points[:, 0] - min_x points[:, 0] = points[:, 0] - min_x
points[:, 1] = points[:, 1] - min_y points[:, 1] = points[:, 1] - min_y
...@@ -100,6 +124,11 @@ class GetInstancesSideBySide(): ...@@ -100,6 +124,11 @@ class GetInstancesSideBySide():
las.write(os.path.join(self.output_folder, str(instance_label) + '.las')) las.write(os.path.join(self.output_folder, str(instance_label) + '.las'))
# write csv file # write csv file
df.to_csv(self.stats_file, index=False) df.to_csv(self.stats_file, index=False)
# save dictionary to pickle file
with open(os.path.join(self.output_folder, 'low_ground_points_dict.pickle'), 'wb') as handle:
pickle.dump(low_ground_points_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if self.verbose: if self.verbose:
# print the number of instances which were saved and done # print the number of instances which were saved and done
print("Saved {} instances".format(len(instance_points))) print("Saved {} instances".format(len(instance_points)))
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
############################ parameters ################################################# ############################ parameters #################################################
# General parameters # General parameters
CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder CLEAR_INPUT_FOLDER=1 # 1: clear input folder, 0: not clear input folder
CONDA_ENV="pdal-env" # conda environment for running the pipeline CONDA_ENV="pdal-env-1" # conda environment for running the pipeline
# Tiling parameters # Tiling parameters
N_TILES=3 N_TILES=3
......
#!/bin/bash
pdal chamfer sample_playground/results/input_data/output/0.las sample_playground/results/instance_segmented_point_clouds_with_ground/output/3.las
#!/bin/bash
python nibio_postprocessing/get_instances_side_by_side.py \
--input_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds/ \
--output_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds/output \
--instance_label 'instance_nr' \
--verbose \
# --merge
python nibio_postprocessing/get_instances_side_by_side.py \
--input_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds_with_ground/ \
--output_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/instance_segmented_point_clouds_with_ground/output \
--instance_label 'instance_nr' \
--verbose \
# --merge
python nibio_postprocessing/get_instances_side_by_side.py \
--input_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/input_data/ \
--output_folder /home/nibio/mutable-outside-world/code/gitlab_fsct/instance_segmentation_classic/sample_playground/results/input_data/output \
--instance_label 'treeID' \
--verbose \
# --merge
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment