Skip to content
Snippets Groups Projects
Commit b889264d authored by Maciej Wielgosz's avatar Maciej Wielgosz
Browse files

train forest and visualize ShapeNet data

parent 49379a4b
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
from dataset import PartNormalDataset
from torch.utils.data import DataLoader
root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
train_dataset = PartNormalDataset(root=root, npoints=4, split='trainval', normal_channel=True)
a = train_dataset.__getitem__(0)
print(a)
```
%% Cell type:code id: tags:
``` python
from pointnet_util import index_points, square_distance
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class TransformerBlock(nn.Module):
def __init__(self, d_points=32, d_model=512, k=16) -> None:
# d_points: number of points
# d_model: number of features
# k: number of neighbors
super().__init__()
self.fc1 = nn.Linear(d_points, d_model)
self.fc2 = nn.Linear(d_model, d_points)
self.fc_delta = nn.Sequential(
nn.Linear(3, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.fc_gamma = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.w_qs = nn.Linear(d_model, d_model, bias=False)
self.w_ks = nn.Linear(d_model, d_model, bias=False)
self.w_vs = nn.Linear(d_model, d_model, bias=False)
self.k = k
# xyz: b x n x 3, features: b x n x f
def forward(self, xyz, features):
dists = square_distance(xyz, xyz)
knn_idx = dists.argsort()[:, :, :self.k] # b x n x k
knn_xyz = index_points(xyz, knn_idx)
pre = features
x = self.fc1(features)
q, k, v = self.w_qs(x), index_points(self.w_ks(x), knn_idx), index_points(self.w_vs(x), knn_idx)
pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz) # b x n x k x f
attn = self.fc_gamma(q[:, :, None] - k + pos_enc)
attn = F.softmax(attn / np.sqrt(k.size(-1)), dim=-2) # b x n x k x f
res = torch.einsum('bmnf,bmnf->bmf', attn, v + pos_enc)
res = self.fc2(res) + pre
return res, attn
# get the model
# Sample input data
xyz = torch.tensor([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]])
features = torch.tensor([[[0.2, 0.3, 0.4, 0.5], [0.6, 0.7, 0.8, 0.9], [1.0, 1.1, 1.2, 1.3]]]).view(1, 4, 3)
# Create a TransformerBlock instance
transformer_block = TransformerBlock(d_points=3, d_model=4, k=2)
# Call the forward method
output, attn = transformer_block(xyz, features)
# Print the output shape and attention shape
print("Output shape:", output.shape)
print("Attention shape:", attn.shape)
```
%% Cell type:code id: tags:
``` python
from nibio_transformer_semantic.dataset import Dataset
import torch
dataset = Dataset(
root='data/forest_txt/validation_txt/',
npoints=1024,
normal_channel=True
)
trainDataLoader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=10, drop_last=True)
# run 4 batches
for i, data in enumerate(trainDataLoader):
if i == 24:
break
print(data[0].shape, data[1].shape)
```
%% Output
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
torch.Size([1, 1024, 6]) torch.Size([1, 1024])
%% Cell type:code id: tags:
``` python
import torch
import torch.nn as nn
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
print("num_classes: ", num_classes)
print("y: ", y)
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
if (y.is_cuda):
return new_y.cuda()
return new_y
y = torch.tensor([0, 1, 2, 3, 4])
print(to_categorical(y, 10))
```
%% Cell type:code id: tags:
``` python
from dataset import PartNormalDataset
import torch
root = '/home/nibio/mutable-outside-world/code/oracle_gpu_runs/data/shapenetcore_partanno_segmentation_benchmark_v0_normal'
TEST_DATASET = PartNormalDataset(root=root, npoints=2, split='test', normal_channel=True)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=1, shuffle=False, num_workers=1)
TEST_DATASET.datapath[3][1]
```
%% Output
'/home/nibio/mutable-outside-world/code/oracle_gpu_runs/data/shapenetcore_partanno_segmentation_benchmark_v0_normal/02691156/14cd2f1de7f68bf3ab550998f901c8e1.txt'
......@@ -163,9 +163,6 @@ class PartNormalDataset(Dataset):
point_set = point_set[choice, :]
seg = seg[choice]
print("cls", cls)
print("seg", seg)
return point_set, cls, seg
def __len__(self):
......
......@@ -47,6 +47,9 @@ class Las2TextMapper:
# put all together to pandas dataframe
points = pd.DataFrame(points, columns=['x', 'y', 'z', 'red', 'green', 'blue', 'label', 'treeID'])
# reduce label to 0, 1, 2, 3
points['label'] = points['label'] - 1
return points
def process_single_file(self, filepath):
......@@ -75,9 +78,11 @@ class Las2TextMapper:
"""
# read all las files in the folder data_dir using glob
list_of_files = glob.glob(self.data_dir + "/*.las", recursive=False)
Parallel(n_jobs=8)(delayed(self.process_single_file)(filepath) for filepath in list_of_files)
Parallel(n_jobs=-1)(
delayed(self.process_single_file)(filepath) for filepath in list_of_files
)
if self.verbose:
print("Done processing the folder")
......
......@@ -137,6 +137,14 @@ def main(args):
points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
optimizer.zero_grad()
# print("target shape ..: ", target.shape)
# print("points shape ..: ", points.shape)
# print("label shape ...: ", label.shape)
# print(" points.shape[1] : ", points.shape[1])
# print("to_categorical(label, num_category): ", to_categorical(label, num_category).shape)
# print("to_categorical(label, num_category).repeat(1, points.shape[1], 1): ", to_categorical(label, num_category).repeat(1, points.shape[1], 1).shape)
seg_pred = classifier(torch.cat([points, to_categorical(label, num_category).repeat(1, points.shape[1], 1)], -1))
seg_pred = seg_pred.contiguous().view(-1, num_part)
target = target.view(-1, 1)[:, 0]
......
......@@ -16,14 +16,12 @@ import numpy as np
from pathlib import Path
from tqdm import tqdm
from dataset import PartNormalDataset
from nibio_transformer_semantic.dataset import Dataset
import hydra
import omegaconf
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37],
'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49],
'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_classes = {'tree': [0,1,2,4]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
......@@ -57,17 +55,18 @@ def main(args):
# use pretty print to print the config
root = hydra.utils.to_absolute_path('data/shapenetcore_partanno_segmentation_benchmark_v0_normal/')
train_dataset = hydra.utils.to_absolute_path('data/forest_txt/validation_txt/')
test_dataset = hydra.utils.to_absolute_path('data/forest_txt/validation_txt/')
TRAIN_DATASET = PartNormalDataset(root=root, npoints=args.num_point, split='trainval', normal_channel=args.normal)
TRAIN_DATASET = Dataset(root=train_dataset, npoints=args.num_point, normal_channel=args.normal)
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
TEST_DATASET = PartNormalDataset(root=root, npoints=args.num_point, split='test', normal_channel=args.normal)
TEST_DATASET = Dataset(root=test_dataset, npoints=args.num_point, normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''MODEL LOADING'''
args.input_dim = (6 if args.normal else 3) + 16
args.num_class = 50
num_category = 16
args.num_class = 4
num_category = 1
num_part = args.num_class
shutil.copy(hydra.utils.to_absolute_path('models/{}/model.py'.format(args.model.name)), '.')
......@@ -128,18 +127,30 @@ def main(args):
classifier = classifier.train()
'''learning one epoch'''
for i, (points, label, target) in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
for i, (points, label) in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
points = points.data.numpy()
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
points, label = points.float().cuda(), label.long().cuda()
optimizer.zero_grad()
seg_pred = classifier(torch.cat([points, to_categorical(label, num_category).repeat(1, points.shape[1], 1)], -1))
# print("points shape ..: ", points.shape)
# print("label shape ...: ", label.shape)
# print(" points.shape[1] : ", points.shape[1])
# print("to_categorical(label, num_category): ", to_categorical(torch.tensor(1).unsqueeze(dim=0).unsqueeze(dim=0).cuda(), num_category).shape)
# print("to_categorical(label, num_category).repeat(1, points.shape[1], 1): ", to_categorical(torch.tensor(1).cuda(), num_category).repeat(1, points.shape[1], 1).shape)
# print("input shape: ", torch.cat([points, to_categorical(torch.tensor(1).unsqueeze(dim=0).unsqueeze(dim=0).cuda(), num_category).repeat(1, points.shape[1], 1)], -1).shape)
seg_pred = classifier(torch.cat([points, to_categorical(torch.tensor(1).unsqueeze(dim=0).unsqueeze(dim=0).cuda(), 16).repeat(1, points.shape[1], 1)], -1))
# seg_pred = classifier(torch.cat([points, to_categorical(label, num_category)], -1))
seg_pred = seg_pred.contiguous().view(-1, num_part)
target = target.view(-1, 1)[:, 0]
target = label.view(-1, 1)[:, 0]
pred_choice = seg_pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
......@@ -166,83 +177,85 @@ def main(args):
classifier = classifier.eval()
for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
for batch_id, (points, label) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
cur_batch_size, NUM_POINT, _ = points.size()
points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
seg_pred = classifier(torch.cat([points, to_categorical(label, num_category).repeat(1, points.shape[1], 1)], -1))
points, label = points.float().cuda(), label.long().cuda()
seg_pred = classifier(torch.cat([points, to_categorical(torch.tensor(1).unsqueeze(dim=0).unsqueeze(dim=0).cuda(), 16).repeat(1, points.shape[1], 1)], -1))
cur_pred_val = seg_pred.cpu().data.numpy()
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
target = target.cpu().data.numpy()
target = label.cpu().data.numpy()
for i in range(cur_batch_size):
cat = seg_label_to_cat[target[i, 0]]
logits = cur_pred_val_logits[i, :, :]
cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]
# for i in range(cur_batch_size):
# cat = seg_label_to_cat[target[i, 0]]
# logits = cur_pred_val_logits[i, :, :]
# cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]
correct = np.sum(cur_pred_val == target)
total_correct += correct
total_seen += (cur_batch_size * NUM_POINT)
for l in range(num_part):
total_seen_class[l] += np.sum(target == l)
total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))
for i in range(cur_batch_size):
segp = cur_pred_val[i, :]
segl = target[i, :]
cat = seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl == l) == 0) and (
np.sum(segp == l) == 0): # part is not present, no prediction as well
part_ious[l - seg_classes[cat][0]] = 1.0
else:
part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
np.sum((segl == l) | (segp == l)))
shape_ious[cat].append(np.mean(part_ious))
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
mean_shape_ious = np.mean(list(shape_ious.values()))
# for l in range(num_part):
# total_seen_class[l] += np.sum(target == l)
# total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))
# for i in range(cur_batch_size):
# segp = cur_pred_val[i, :]
# segl = target[i, :]
# cat = seg_label_to_cat[segl[0]]
# part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
# for l in seg_classes[cat]:
# if (np.sum(segl == l) == 0) and (
# np.sum(segp == l) == 0): # part is not present, no prediction as well
# part_ious[l - seg_classes[cat][0]] = 1.0
# else:
# part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
# np.sum((segl == l) | (segp == l)))
# shape_ious[cat].append(np.mean(part_ious))
# all_shape_ious = []
# for cat in shape_ious.keys():
# for iou in shape_ious[cat]:
# all_shape_ious.append(iou)
# shape_ious[cat] = np.mean(shape_ious[cat])
# mean_shape_ious = np.mean(list(shape_ious.values()))
test_metrics['accuracy'] = total_correct / float(total_seen)
test_metrics['class_avg_accuracy'] = np.mean(
np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float32))
for cat in sorted(shape_ious.keys()):
logger.info('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
test_metrics['class_avg_iou'] = mean_shape_ious
test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
logger.info('Epoch %d test Accuracy: %f Class avg mIOU: %f Inctance avg mIOU: %f' % (
epoch + 1, test_metrics['accuracy'], test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
logger.info('Save model...')
savepath = 'best_model.pth'
logger.info('Saving at %s' % savepath)
state = {
'epoch': epoch,
'train_acc': train_instance_acc,
'test_acc': test_metrics['accuracy'],
'class_avg_iou': test_metrics['class_avg_iou'],
'inctance_avg_iou': test_metrics['inctance_avg_iou'],
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
logger.info('Saving model....')
if test_metrics['accuracy'] > best_acc:
best_acc = test_metrics['accuracy']
if test_metrics['class_avg_iou'] > best_class_avg_iou:
best_class_avg_iou = test_metrics['class_avg_iou']
if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
best_inctance_avg_iou = test_metrics['inctance_avg_iou']
logger.info('Best accuracy is: %.5f' % best_acc)
logger.info('Best class avg mIOU is: %.5f' % best_class_avg_iou)
logger.info('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
# test_metrics['class_avg_accuracy'] = np.mean(
# np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float32))
print("test_metrics['accuracy']: ", test_metrics['accuracy'])
# for cat in sorted(shape_ious.keys()):
# logger.info('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
# test_metrics['class_avg_iou'] = mean_shape_ious
# test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
# logger.info('Epoch %d test Accuracy: %f Class avg mIOU: %f Inctance avg mIOU: %f' % (
# epoch + 1, test_metrics['accuracy'], test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
# if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
# logger.info('Save model...')
# savepath = 'best_model.pth'
# logger.info('Saving at %s' % savepath)
# state = {
# 'epoch': epoch,
# 'train_acc': train_instance_acc,
# 'test_acc': test_metrics['accuracy'],
# 'class_avg_iou': test_metrics['class_avg_iou'],
# 'inctance_avg_iou': test_metrics['inctance_avg_iou'],
# 'model_state_dict': classifier.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# }
# torch.save(state, savepath)
# logger.info('Saving model....')
# if test_metrics['accuracy'] > best_acc:
# best_acc = test_metrics['accuracy']
# if test_metrics['class_avg_iou'] > best_class_avg_iou:
# best_class_avg_iou = test_metrics['class_avg_iou']
# if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
# best_inctance_avg_iou = test_metrics['inctance_avg_iou']
# logger.info('Best accuracy is: %.5f' % best_acc)
# logger.info('Best class avg mIOU is: %.5f' % best_class_avg_iou)
# logger.info('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
global_epoch += 1
......
"""
Author: Benny
Date: Nov 2019
"""
import argparse
import os
import torch
import datetime
import logging
import sys
import importlib
import shutil
import provider
import numpy as np
from pathlib import Path
from tqdm import tqdm
from dataset import PartNormalDataset
import hydra
import omegaconf
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37],
'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49],
'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
if (y.is_cuda):
return new_y.cuda()
return new_y
@hydra.main(config_path='config', config_name='partseg')
def main(args):
omegaconf.OmegaConf.set_struct(args, False)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# print('GPU available: {}'.format(torch.cuda.is_available()))
logger = logging.getLogger(__name__)
# print(args.pretty())
# use pretty print to print the config
root = hydra.utils.to_absolute_path('data/shapenetcore_partanno_segmentation_benchmark_v0_normal/')
TEST_DATASET = PartNormalDataset(root=root, npoints=args.num_point, split='test', normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''MODEL LOADING'''
args.input_dim = (6 if args.normal else 3) + 16
args.num_class = 50
num_category = 16
num_part = args.num_class
shutil.copy(hydra.utils.to_absolute_path('models/{}/model.py'.format(args.model.name)), '.')
# print if gpu is available
logger.info('GPU available: {}'.format(torch.cuda.is_available()))
classifier = getattr(importlib.import_module('models.{}.model'.format(args.model.name)), 'PointTransformerSeg')(args).cuda()
# load pretrain model
checkpoint = torch.load('best_model.pth')
classifier.load_state_dict(checkpoint['model_state_dict'])
logger.info('Use pretrain model')
results_dir = hydra.utils.to_absolute_path('results')
# create folder to save the results las files
if not os.path.exists(results_dir):
os.mkdir(results_dir)
with torch.no_grad():
test_metrics = {}
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(num_part)]
total_correct_class = [0 for _ in range(num_part)]
shape_ious = {cat: [] for cat in seg_classes.keys()}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
classifier = classifier.eval()
for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
cur_batch_size, NUM_POINT, _ = points.size()
points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
seg_pred = classifier(torch.cat([points, to_categorical(label, num_category).repeat(1, points.shape[1], 1)], -1))
cur_pred_val = seg_pred.cpu().data.numpy()
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
target = target.cpu().data.numpy()
for i in range(cur_batch_size):
cat = seg_label_to_cat[target[i, 0]]
logits = cur_pred_val_logits[i, :, :]
cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]
# get x,y,z coordinates of points
points = points.cpu().data.numpy()
points = points[:, :, 0:3]
# add predicted labels to points
points_pd = np.concatenate([points, np.expand_dims(cur_pred_val, axis=2)], axis=2)
points_gt = np.concatenate([points, np.expand_dims(target, axis=2)], axis=2)
# save points as text files in the results folder and preserve the same name as the original txt file
for i in range(cur_batch_size):
os.makedirs(os.path.join(results_dir,TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-2]), exist_ok=True)
np.savetxt(os.path.join(
os.path.join(results_dir,TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-2]),
TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-1].replace('.txt', '_pred.txt')
),
points_pd[i, :, :], fmt='%f %f %f %d')
# copy original txt file to the results folder
shutil.copy(TEST_DATASET.datapath[batch_id * args.batch_size + i][1], os.path.join(
os.path.join(results_dir,TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-2]),
TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-1]
))
# save ground truth labels as text files in the results folder and preserve the same name as the original txt file
np.savetxt(os.path.join(
os.path.join(results_dir,TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-2]),
TEST_DATASET.datapath[batch_id * args.batch_size + i][1].split('/')[-1].replace('.txt', '_gt.txt')
),
points_gt[i, :, :], fmt='%f %f %f %d')
correct = np.sum(cur_pred_val == target)
total_correct += correct
total_seen += (cur_batch_size * NUM_POINT)
for l in range(num_part):
total_seen_class[l] += np.sum(target == l)
total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))
for i in range(cur_batch_size):
segp = cur_pred_val[i, :]
segl = target[i, :]
cat = seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl == l) == 0) and (
np.sum(segp == l) == 0): # part is not present, no prediction as well
part_ious[l - seg_classes[cat][0]] = 1.0
else:
part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
np.sum((segl == l) | (segp == l)))
shape_ious[cat].append(np.mean(part_ious))
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
mean_shape_ious = np.mean(list(shape_ious.values()))
test_metrics['accuracy'] = total_correct / float(total_seen)
test_metrics['class_avg_accuracy'] = np.mean(
np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float32))
for cat in sorted(shape_ious.keys()):
logger.info('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
test_metrics['class_avg_iou'] = mean_shape_ious
test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
logger.info('eval accuracy: %f' % (test_metrics['accuracy']))
logger.info('eval avg class acc: %f' % (test_metrics['class_avg_accuracy']))
logger.info('eval avg class IoU: %f' % (test_metrics['class_avg_iou']))
logger.info('eval avg instance IoU: %f' % (test_metrics['inctance_avg_iou']))
if __name__ == '__main__':
main()
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment