diff --git a/.gitignore b/.gitignore
index 2bace18d74207d7f93466b1c31b253c6d731a79e..29dc5f63fdf56545bca96f32d2f97aa75d6ab191 100644
--- a/.gitignore
+++ b/.gitignore
@@ -54,6 +54,7 @@ pip_egg_info/
 *~
 ._*
 .DS_Store
+*.pt
 
 # Byte-compiled / optimized / DLL files
 __pycache__/
@@ -127,4 +128,7 @@ tensorboard/
 wandb/
 data/
 shapenet_part_seg_hdf5_data/
+ShapeNet/
+maciek_data
+
 ```
diff --git a/PyG_implementation/my_data_loader.py b/PyG_implementation/my_data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb1c7846ce5e441fe35ff8dd8d951e49eebe11ee
--- /dev/null
+++ b/PyG_implementation/my_data_loader.py
@@ -0,0 +1,99 @@
+import json
+import os
+import os.path as osp
+import random
+import shutil
+from typing import Callable, List, Optional, Union
+
+import torch
+
+from torch_geometric.data import (
+    Data,
+    InMemoryDataset
+)
+from torch_geometric.io import read_txt_array
+
+
+class MyData(InMemoryDataset):
+    seg_classes = [0, 1, 2, 3]
+
+    def __init__(self,
+                 root: str,
+                 include_normals: bool = True,
+                 split: str = 'trainval',
+                 split_ratio: tuple = (0.7, 0.15, 0.15),
+                 transform: Optional[Callable] = None,
+                 pre_transform: Optional[Callable] = None,
+                 pre_filter: Optional[Callable] = None):
+        self.split_ratio = split_ratio
+        super().__init__(root, transform, pre_transform, pre_filter)
+
+        if split == 'train':
+            path = self.processed_paths[0]
+        elif split == 'val':
+            path = self.processed_paths[1]
+        elif split == 'test':
+            path = self.processed_paths[2]
+        elif split == 'trainval':
+            path = self.processed_paths[3]
+        else:
+            raise ValueError((f'Split {split} found, but expected either '
+                              'train, val, trainval or test'))
+
+        self.data, self.slices = torch.load(path)
+        self.data.x = self.data.x if include_normals else None
+
+    @property
+    def raw_file_names(self):
+        return os.listdir(self.raw_dir)
+    
+    @property
+    def raw_dir(self):
+        return os.path.join(self.root, 'raw')
+    
+    @property
+    def processed_dir(self):
+        return os.path.join(self.root, 'processed')
+
+    @property
+    def processed_file_names(self):
+        return ['data_{}.pt'.format(name) for name in ['train', 'val', 'test', 'trainval']]
+
+    def process(self):
+        # Read files in raw directory
+        filenames = self.raw_file_names
+
+        # Shuffle filenames for random splitting
+        random.shuffle(filenames)
+
+        # Calculate split sizes
+        total_size = len(filenames)
+        train_size = int(total_size * self.split_ratio[0])
+        val_size = int(total_size * self.split_ratio[1])
+
+        # Split filenames
+        train_filenames = filenames[:train_size]
+        val_filenames = filenames[train_size:train_size+val_size]
+        test_filenames = filenames[train_size+val_size:]
+
+        for i, split_filenames in enumerate([train_filenames, val_filenames, test_filenames, filenames]):
+            data_list = self.process_filenames(split_filenames)
+            torch.save(self.collate(data_list), self.processed_paths[i])
+
+    def process_filenames(self, filenames):
+        data_list = []
+
+        for name in filenames:
+            data = read_txt_array(osp.join(self.raw_dir, name))
+            pos = data[:, :3]
+            x = data[:, 3:6]
+            y = data[:, -1].type(torch.long)
+            category = torch.tensor(0, dtype=torch.long)  # there is only one category !!
+            data = Data(pos=pos, x=x, y=y, category=category)
+            if self.pre_filter is not None and not self.pre_filter(data):
+                continue
+            if self.pre_transform is not None:
+                data = self.pre_transform(data)
+            data_list.append(data)
+
+        return data_list
diff --git a/PyG_implementation/pyg_implementaion_main.py b/PyG_implementation/pyg_implementaion_main.py
index f63c00cbfa640b77b7910d9f4c3c7cf85aef20d7..b6e35b1dba2812889e3d298e293a9c8077fd2af9 100644
--- a/PyG_implementation/pyg_implementaion_main.py
+++ b/PyG_implementation/pyg_implementaion_main.py
@@ -35,7 +35,7 @@ random.seed(config.seed)
 torch.manual_seed(config.seed)
 device = torch.device(config.device)
 
-config.category = 'Car' #@param ["Bag", "Cap", "Car", "Chair", "Earphone", "Guitar", "Knife", "Lamp", "Laptop", "Motorbike", "Mug", "Pistol", "Rocket", "Skateboard", "Table"] {type:"raw"}
+config.category = 'Airplane' #@param ["Airplane", "Bag", "Cap", "Car", "Chair", "Earphone", "Guitar", "Knife", "Lamp", "Laptop", "Motorbike", "Mug", "Pistol", "Rocket", "Skateboard", "Table"] {type:"raw"}
 config.random_jitter_translation = 1e-2
 config.random_rotation_interval_x = 15
 config.random_rotation_interval_y = 15
@@ -65,12 +65,12 @@ pre_transform = T.NormalizeScale()
 
 dataset_path = os.path.join('ShapeNet', config.category)
 
+
 train_val_dataset = ShapeNet(
     dataset_path, config.category, split='trainval',
     transform=transform, pre_transform=pre_transform
 )
 
-
 segmentation_class_frequency = {}
 for idx in tqdm(range(len(train_val_dataset))):
     pc_viz = train_val_dataset[idx].pos.numpy().tolist()
diff --git a/PyG_implementation/pyg_implementaion_main_my_data_loader.py b/PyG_implementation/pyg_implementaion_main_my_data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..63aed1992df033ba93bc4d5e6952551919c12322
--- /dev/null
+++ b/PyG_implementation/pyg_implementaion_main_my_data_loader.py
@@ -0,0 +1,361 @@
+import os
+import wandb
+import random
+import numpy as np
+from tqdm.auto import tqdm
+
+import torch
+import torch.nn.functional as F
+
+from torch_scatter import scatter
+from torchmetrics.functional import jaccard_index
+
+import torch_geometric.transforms as T
+from torch_geometric.datasets import ShapeNet
+from torch_geometric.loader import DataLoader
+from torch_geometric.nn import MLP, DynamicEdgeConv
+
+
+wandb_project = "pyg-point-cloud" #@param {"type": "string"} , maciej-wielgosz-nibio
+wandb_run_name = "train-dgcnn" #@param {"type": "string"}
+
+wandb.init(
+    entity="maciej-wielgosz-nibio",
+    project=wandb_project, 
+    name=wandb_run_name, 
+    job_type="train"
+    )
+
+config = wandb.config
+
+config.seed = 42
+config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
+
+random.seed(config.seed)
+torch.manual_seed(config.seed)
+device = torch.device(config.device)
+
+config.category = 'Car' #@param ["Bag", "Cap", "Car", "Chair", "Earphone", "Guitar", "Knife", "Lamp", "Laptop", "Motorbike", "Mug", "Pistol", "Rocket", "Skateboard", "Table"] {type:"raw"}
+config.random_jitter_translation = 1e-2
+config.random_rotation_interval_x = 15
+config.random_rotation_interval_y = 15
+config.random_rotation_interval_z = 15
+config.validation_split = 0.2
+config.batch_size = 4
+config.num_workers = 6
+
+config.num_nearest_neighbours = 30
+config.aggregation_operator = "max"
+config.dropout = 0.5
+config.initial_lr = 1e-3
+config.lr_scheduler_step_size = 5
+config.gamma = 0.8
+
+config.epochs = 1
+
+
+transform = T.Compose([
+    T.RandomJitter(config.random_jitter_translation),
+    T.RandomRotate(config.random_rotation_interval_x, axis=0),
+    T.RandomRotate(config.random_rotation_interval_y, axis=1),
+    T.RandomRotate(config.random_rotation_interval_z, axis=2)
+])
+pre_transform = T.NormalizeScale()
+
+
+# dataset_path = os.path.join('ShapeNet', config.category)
+
+
+# train_val_dataset = ShapeNet(
+#     dataset_path, config.category, split='trainval',
+#     transform=transform, pre_transform=pre_transform
+# )
+
+from my_data_loader import MyData
+
+dataset_path = "/home/nibio/mutable-outside-world/code/nibio_graph_sem_seg/maciek_data/plane_maciek"
+
+train_val_dataset = MyData(
+    dataset_path, config.category, split='trainval',
+    transform=transform, pre_transform=pre_transform
+)
+
+
+segmentation_class_frequency = {}
+for idx in tqdm(range(len(train_val_dataset))):
+    pc_viz = train_val_dataset[idx].pos.numpy().tolist()
+    segmentation_label = train_val_dataset[idx].y.numpy().tolist()
+    for label in set(segmentation_label):
+        segmentation_class_frequency[label] = segmentation_label.count(label)
+class_offset = min(list(segmentation_class_frequency.keys()))
+print("Class Offset:", class_offset)
+
+for idx in range(len(train_val_dataset)):
+    train_val_dataset[idx].y -= class_offset
+
+num_train_examples = int((1 - config.validation_split) * len(train_val_dataset))
+train_dataset = train_val_dataset[:num_train_examples]
+val_dataset = train_val_dataset[num_train_examples:]
+
+train_loader = DataLoader(
+    train_dataset, batch_size=config.batch_size,
+    shuffle=True, num_workers=config.num_workers
+)
+val_loader = DataLoader(
+    val_dataset, batch_size=config.batch_size,
+    shuffle=False, num_workers=config.num_workers
+)
+visualization_loader = DataLoader(
+    val_dataset[:10], batch_size=1,
+    shuffle=False, num_workers=config.num_workers
+)
+
+
+
+class DGCNN(torch.nn.Module):
+    def __init__(self, out_channels, k=30, aggr='max'):
+        super().__init__()
+        self.conv1 = DynamicEdgeConv(
+            MLP([2 * 6, 64, 64]), k, aggr
+        )
+        self.conv2 = DynamicEdgeConv(
+            MLP([2 * 64, 64, 64]), k, aggr
+        )
+        self.conv3 = DynamicEdgeConv(
+            MLP([2 * 64, 64, 64]), k, aggr
+        )
+        self.mlp = MLP(
+            [3 * 64, 1024, 256, 128, out_channels],
+            dropout=0.5, norm=None
+        )
+
+    def forward(self, data):
+        x, pos, batch = data.x, data.pos, data.batch
+        x0 = torch.cat([x, pos], dim=-1)
+        x1 = self.conv1(x0, batch)
+        x2 = self.conv2(x1, batch)
+        x3 = self.conv3(x2, batch)
+        out = self.mlp(torch.cat([x1, x2, x3], dim=1))
+        return F.log_softmax(out, dim=1)
+    
+
+config.num_classes = train_dataset.num_classes
+
+model = DGCNN(
+    out_channels=train_dataset.num_classes,
+    k=config.num_nearest_neighbours,
+    aggr=config.aggregation_operator
+).to(device)
+optimizer = torch.optim.Adam(model.parameters(), lr=config.initial_lr)
+scheduler = torch.optim.lr_scheduler.StepLR(
+    optimizer, step_size=config.lr_scheduler_step_size, gamma=config.gamma
+)
+
+def train_step(epoch):
+    model.train()
+    
+    ious, categories = [], []
+    total_loss = correct_nodes = total_nodes = 0
+    y_map = torch.empty(
+        train_loader.dataset.num_classes, device=device
+    ).long()
+    num_train_examples = len(train_loader)
+    
+    progress_bar = tqdm(
+        train_loader, desc=f"Training Epoch {epoch}/{config.epochs}"
+    )
+    
+    for data in progress_bar:
+        data = data.to(device)
+        
+        optimizer.zero_grad()
+        outs = model(data)
+        loss = F.nll_loss(outs, data.y)
+        loss.backward()
+        optimizer.step()
+        
+        total_loss += loss.item()
+        
+        correct_nodes += outs.argmax(dim=1).eq(data.y).sum().item()
+        total_nodes += data.num_nodes
+        
+        sizes = (data.ptr[1:] - data.ptr[:-1]).tolist()
+        for out, y in zip(outs.split(sizes), data.y.split(sizes)):
+            part = MyData.seg_classes
+            part = torch.tensor(part, device=device)
+
+            y_map[part] = torch.arange(part.size(0), device=device)
+
+            iou = jaccard_index(
+                out[:, part].argmax(dim=-1), y_map[y],
+                task="multiclass", num_classes=part.size(0)
+            )
+            ious.append(iou)
+
+        categories.append(data.category)
+        
+    iou = torch.tensor(ious, device=device)
+    category = torch.cat(categories, dim=0)
+
+    print("iou shape:", iou.shape)
+    print("category shape:", category.shape)
+
+    mean_iou = float(scatter(iou, category, reduce='mean').mean())
+
+    
+    return {
+        "Train/Loss": total_loss / num_train_examples,
+        "Train/Accuracy": correct_nodes / total_nodes,
+        "Train/IoU": mean_iou
+    }
+
+
+@torch.no_grad()
+def val_step(epoch):
+    model.eval()
+
+    ious, categories = [], []
+    total_loss = correct_nodes = total_nodes = 0
+    y_map = torch.empty(
+        val_loader.dataset.num_classes, device=device
+    ).long()
+    num_val_examples = len(val_loader)
+    
+    progress_bar = tqdm(
+        val_loader, desc=f"Validating Epoch {epoch}/{config.epochs}"
+    )
+    
+    for data in progress_bar:
+        data = data.to(device)
+        outs = model(data)
+        
+        loss = F.nll_loss(outs, data.y)
+        total_loss += loss.item()
+        
+        correct_nodes += outs.argmax(dim=1).eq(data.y).sum().item()
+        total_nodes += data.num_nodes
+
+        sizes = (data.ptr[1:] - data.ptr[:-1]).tolist()
+        for out, y, category in zip(outs.split(sizes), data.y.split(sizes),
+                                    data.category.tolist()):
+            category = list(ShapeNet.seg_classes.keys())[category]
+            part = ShapeNet.seg_classes[category]
+            part = torch.tensor(part, device=device)
+
+            y_map[part] = torch.arange(part.size(0), device=device)
+
+            iou = jaccard_index(
+                out[:, part].argmax(dim=-1), y_map[y],
+                task="multiclass", num_classes=part.size(0)
+            )
+            ious.append(iou)
+
+        categories.append(data.category)
+
+    iou = torch.tensor(ious, device=device)
+    category = torch.cat(categories, dim=0)
+    mean_iou = float(scatter(iou, category, reduce='mean').mean())
+    
+    return {
+        "Validation/Loss": total_loss / num_val_examples,
+        "Validation/Accuracy": correct_nodes / total_nodes,
+        "Validation/IoU": mean_iou
+    }
+
+
+@torch.no_grad()
+def visualization_step(epoch, table):
+    model.eval()
+    for data in tqdm(visualization_loader):
+        data = data.to(device)
+        outs = model(data)
+
+        predicted_labels = outs.argmax(dim=1)
+        accuracy = predicted_labels.eq(data.y).sum().item() / data.num_nodes
+
+        sizes = (data.ptr[1:] - data.ptr[:-1]).tolist()
+        ious, categories = [], []
+        y_map = torch.empty(
+            visualization_loader.dataset.num_classes, device=device
+        ).long()
+        for out, y, category in zip(
+            outs.split(sizes), data.y.split(sizes), data.category.tolist()
+        ):
+            category = list(ShapeNet.seg_classes.keys())[category]
+            part = ShapeNet.seg_classes[category]
+            part = torch.tensor(part, device=device)
+            y_map[part] = torch.arange(part.size(0), device=device)
+            iou = jaccard_index(
+                out[:, part].argmax(dim=-1), y_map[y],
+                task="multiclass", num_classes=part.size(0)
+            )
+            ious.append(iou)
+        categories.append(data.category)
+        iou = torch.tensor(ious, device=device)
+        category = torch.cat(categories, dim=0)
+        mean_iou = float(scatter(iou, category, reduce='mean').mean())
+
+        gt_pc_viz = data.pos.cpu().numpy().tolist()
+        segmentation_label = data.y.cpu().numpy().tolist()
+        frequency_dict = {key: 0 for key in segmentation_class_frequency.keys()}
+        for label in set(segmentation_label):
+            frequency_dict[label] = segmentation_label.count(label)
+        for j in range(len(gt_pc_viz)):
+            # gt_pc_viz[j] += [segmentation_label[j] + 1 - class_offset]
+            gt_pc_viz[j] += [segmentation_label[j] + 1]
+
+        predicted_pc_viz = data.pos.cpu().numpy().tolist()
+        segmentation_label = data.y.cpu().numpy().tolist()
+        frequency_dict = {key: 0 for key in segmentation_class_frequency.keys()}
+        for label in set(segmentation_label):
+            frequency_dict[label] = segmentation_label.count(label)
+        for j in range(len(predicted_pc_viz)):
+            # predicted_pc_viz[j] += [segmentation_label[j] + 1 - class_offset]
+            predicted_pc_viz[j] += [segmentation_label[j] + 1]
+
+        table.add_data(
+            epoch, wandb.Object3D(np.array(gt_pc_viz)),
+            wandb.Object3D(np.array(predicted_pc_viz)),
+            accuracy, mean_iou
+        )
+    
+    return table
+
+
+def save_checkpoint(epoch):
+    """Save model checkpoints as Weights & Biases artifacts"""
+    torch.save({
+        'epoch': epoch,
+        'model_state_dict': model.state_dict(),
+        'optimizer_state_dict': optimizer.state_dict()
+    }, "checkpoint.pt")
+    
+    artifact_name = wandb.util.make_artifact_name_safe(
+        f"{wandb.run.name}-{wandb.run.id}-checkpoint"
+    )
+    
+    checkpoint_artifact = wandb.Artifact(artifact_name, type="checkpoint")
+    checkpoint_artifact.add_file("checkpoint.pt")
+    wandb.log_artifact(
+        checkpoint_artifact, aliases=["latest", f"epoch-{epoch}"]
+    )
+
+
+table = wandb.Table(columns=["Epoch", "Ground-Truth", "Prediction", "Accuracy", "IoU"])
+
+for epoch in range(1, config.epochs + 1):
+    train_metrics = train_step(epoch)
+    val_metrics = val_step(epoch)
+    
+    metrics = {**train_metrics, **val_metrics}
+    metrics["learning_rate"] = scheduler.get_last_lr()[-1]
+    wandb.log(metrics)
+    
+    table = visualization_step(epoch, table)
+    
+    scheduler.step()
+    save_checkpoint(epoch)
+
+wandb.log({"Evaluation": table})
+
+wandb.finish()
\ No newline at end of file
diff --git a/PyG_implementation/test_data_loader.ipynb b/PyG_implementation/test_data_loader.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..0e0a8793906b38eee7cd30a8e923c37d7c696ff3
--- /dev/null
+++ b/PyG_implementation/test_data_loader.ipynb
@@ -0,0 +1,338 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/nibio/.local/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+      "  from .autonotebook import tqdm as notebook_tqdm\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "tensor([[ 0.0481, -0.0568, -0.6002],\n",
+      "        [-0.1801, -0.0686,  0.0642],\n",
+      "        [-0.1216, -0.0620,  0.0746],\n",
+      "        [-0.5081,  0.0408,  0.0671],\n",
+      "        [ 0.3576, -0.0771, -0.4065],\n",
+      "        [-0.7646,  0.0484,  0.1994],\n",
+      "        [-0.7131,  0.0448,  0.1149],\n",
+      "        [-0.7428,  0.0474,  0.1869],\n",
+      "        [-0.6872,  0.0435,  0.0926],\n",
+      "        [ 0.0944, -0.0845,  0.6770]])\n",
+      "tensor([[-1.4150e-01,  2.2580e-01,  9.6380e-01],\n",
+      "        [ 1.2900e-04, -7.4390e-01,  6.6830e-01],\n",
+      "        [-7.4760e-02, -3.4310e-01,  9.3630e-01],\n",
+      "        [-6.1190e-02,  2.9500e-01,  9.5350e-01],\n",
+      "        [ 9.9950e-01,  2.3410e-02, -2.2080e-02],\n",
+      "        [-1.8040e-01,  9.8150e-01, -6.4260e-02],\n",
+      "        [-3.1830e-02, -9.9920e-01,  2.2440e-02],\n",
+      "        [-2.5430e-02, -9.9910e-01,  3.4190e-02],\n",
+      "        [-3.1820e-02, -9.9920e-01,  2.2450e-02],\n",
+      "        [-4.3480e-02, -4.7540e-01,  8.7870e-01]])\n",
+      "tensor([3, 0, 0, 0, 3, 2, 2, 2, 2, 3])\n",
+      "tensor([[-0.7664,  0.1929, -0.0083],\n",
+      "        [-0.3521, -0.0478, -0.0628],\n",
+      "        [-0.4089, -0.0751,  0.0172],\n",
+      "        [-0.2523,  0.0568, -0.8086],\n",
+      "        [-0.0043,  0.0197,  0.4197],\n",
+      "        [ 0.0110,  0.0176,  0.3963],\n",
+      "        [-0.0650,  0.0271,  0.5071],\n",
+      "        [-0.1042,  0.0317,  0.5619],\n",
+      "        [-0.0946, -0.0643,  0.0584],\n",
+      "        [ 0.0309, -0.0066,  0.3776]])\n",
+      "tensor([[-7.8560e-03,  2.3670e-02, -9.9970e-01],\n",
+      "        [ 8.0000e-06, -5.9780e-01, -8.0160e-01],\n",
+      "        [-5.5770e-02, -9.4810e-01,  3.1320e-01],\n",
+      "        [-7.4080e-02, -9.9030e-01, -1.1790e-01],\n",
+      "        [-7.0410e-02,  9.9010e-01, -1.2160e-01],\n",
+      "        [-2.8900e-04,  9.9410e-01, -1.0810e-01],\n",
+      "        [-5.9570e-02,  9.9030e-01, -1.2520e-01],\n",
+      "        [-4.3470e-02, -9.9250e-01,  1.1440e-01],\n",
+      "        [-2.0010e-01, -6.3780e-01,  7.4380e-01],\n",
+      "        [ 1.3440e-02, -9.8920e-01,  1.4600e-01]])\n",
+      "tensor([0, 0, 0, 1, 1, 1, 1, 1, 0, 1])\n",
+      "tensor([[ 0.3761, -0.0980, -0.3470],\n",
+      "        [ 0.7416,  0.0947, -0.0407],\n",
+      "        [ 0.8828, -0.0196, -0.0612],\n",
+      "        [ 0.2774, -0.0191, -0.2942],\n",
+      "        [-0.7771,  0.1881,  0.0032],\n",
+      "        [ 0.3628,  0.1071,  0.0045],\n",
+      "        [-0.1776,  0.1073,  0.0020],\n",
+      "        [ 0.3345,  0.1074,  0.0012],\n",
+      "        [-0.1651,  0.1061,  0.0144],\n",
+      "        [ 0.4357,  0.1071,  0.0040]])\n",
+      "tensor([[ 1.5920e-01, -2.2520e-01, -9.6120e-01],\n",
+      "        [ 9.2470e-02,  9.2720e-01, -3.6310e-01],\n",
+      "        [ 2.8110e-01, -2.2570e-01, -9.3280e-01],\n",
+      "        [-1.7000e-05,  1.0000e+00,  1.6200e-04],\n",
+      "        [-8.4170e-02,  1.7300e-03,  9.9640e-01],\n",
+      "        [-1.5000e-05,  9.6180e-01,  2.7380e-01],\n",
+      "        [ 8.7820e-03,  9.9990e-01, -9.6050e-03],\n",
+      "        [ 3.3400e-04,  9.9990e-01,  1.1370e-02],\n",
+      "        [-1.2730e-02,  9.9430e-01,  1.0560e-01],\n",
+      "        [ 3.6120e-03,  9.9850e-01,  5.5120e-02]])\n",
+      "tensor([3, 0, 0, 3, 2, 0, 0, 0, 0, 0])\n",
+      "tensor([[ 0.2809,  0.0273,  0.0761],\n",
+      "        [ 0.1152,  0.0927,  0.6517],\n",
+      "        [ 0.1140, -0.1315, -0.5324],\n",
+      "        [ 0.1079, -0.1322,  0.0876],\n",
+      "        [ 0.1172, -0.1311, -0.6285],\n",
+      "        [ 0.1013, -0.1330, -0.4641],\n",
+      "        [ 0.1040,  0.1949,  0.9361],\n",
+      "        [ 0.1190,  0.0887, -0.6560],\n",
+      "        [-0.6309, -0.0683, -0.0366],\n",
+      "        [ 0.4285, -0.0589, -0.1052]])\n",
+      "tensor([[-0.0025,  0.9998,  0.0220],\n",
+      "        [ 0.7341,  0.1407, -0.6643],\n",
+      "        [ 0.1174, -0.9931,  0.0000],\n",
+      "        [ 0.0220, -0.2474,  0.9687],\n",
+      "        [ 0.1174, -0.9931,  0.0000],\n",
+      "        [ 0.1245, -0.9922,  0.0000],\n",
+      "        [-0.1405,  0.9901, -0.0024],\n",
+      "        [ 0.1922,  0.0316, -0.9808],\n",
+      "        [-0.2415,  0.6557, -0.7154],\n",
+      "        [ 0.0539,  0.0000, -0.9985]])\n",
+      "tensor([1, 1, 1, 1, 1, 1, 1, 1, 2, 0])\n",
+      "tensor([[ 0.0315, -0.1876,  0.2434],\n",
+      "        [-0.7607,  0.0910,  0.0332],\n",
+      "        [-0.0177, -0.0560, -0.2275],\n",
+      "        [-0.0016, -0.0736, -0.1388],\n",
+      "        [ 0.3045, -0.0446,  0.3330],\n",
+      "        [ 0.1719, -0.1019,  0.0248],\n",
+      "        [-0.7283,  0.0887, -0.2468],\n",
+      "        [-0.6753,  0.0768, -0.1422],\n",
+      "        [-0.6305,  0.0682, -0.0641],\n",
+      "        [-0.0140, -0.0600,  0.2102]])\n",
+      "tensor([[-0.9204, -0.1365,  0.3663],\n",
+      "        [ 0.1413, -0.4943, -0.8577],\n",
+      "        [-0.4340,  0.8827,  0.1802],\n",
+      "        [-0.0474,  0.9948,  0.0904],\n",
+      "        [ 0.1497,  0.9741, -0.1697],\n",
+      "        [ 0.0035, -0.9818,  0.1898],\n",
+      "        [-0.0625,  0.9897,  0.1289],\n",
+      "        [ 0.0335, -0.9909, -0.1301],\n",
+      "        [ 0.1704, -0.2261,  0.9591],\n",
+      "        [-0.1726,  0.9657, -0.1940]])\n",
+      "tensor([3, 0, 1, 1, 1, 0, 2, 2, 2, 1])\n",
+      "tensor([[ 7.3517e-01,  5.1309e-02,  6.1309e-02],\n",
+      "        [ 6.2617e-02, -2.6242e-02,  2.1035e-01],\n",
+      "        [ 4.9808e-02, -2.5158e-04,  3.1775e-01],\n",
+      "        [ 2.1079e-01, -5.1688e-02, -2.8077e-01],\n",
+      "        [ 2.4843e-01, -3.4576e-02, -2.7980e-01],\n",
+      "        [ 7.6359e-01,  5.3485e-02, -5.9962e-02],\n",
+      "        [ 1.6547e-02, -1.5852e-01, -1.6729e-01],\n",
+      "        [ 5.2288e-01,  8.2789e-02, -1.8368e-02],\n",
+      "        [ 1.4249e-01, -9.1106e-02,  2.2979e-01],\n",
+      "        [ 3.0719e-01, -7.7852e-02,  2.8854e-01]])\n",
+      "tensor([[ 0.0000e+00,  6.1360e-01,  7.8970e-01],\n",
+      "        [-3.9020e-02, -9.9480e-01,  9.4340e-02],\n",
+      "        [-2.2380e-02, -9.9590e-01,  8.7780e-02],\n",
+      "        [ 3.4950e-02, -8.0030e-01,  5.9850e-01],\n",
+      "        [ 4.9940e-03,  8.7570e-01, -4.8280e-01],\n",
+      "        [ 8.0000e-06,  5.7800e-01, -8.1610e-01],\n",
+      "        [ 7.7860e-01, -6.2610e-01,  4.1640e-02],\n",
+      "        [ 0.0000e+00,  9.1780e-01, -3.9700e-01],\n",
+      "        [-3.1400e-01, -7.4360e-01, -5.9040e-01],\n",
+      "        [ 2.4250e-02,  1.0800e-01, -9.9390e-01]])\n",
+      "tensor([0, 1, 1, 3, 3, 0, 3, 0, 3, 3])\n",
+      "tensor([[-0.5816,  0.1032,  0.0009],\n",
+      "        [ 0.6587, -0.0254, -0.0744],\n",
+      "        [ 0.5169, -0.0130,  0.0381],\n",
+      "        [-0.4170, -0.0322, -0.1046],\n",
+      "        [-0.6533,  0.1294, -0.0107],\n",
+      "        [ 0.3044, -0.0282,  0.0654],\n",
+      "        [ 0.1395, -0.0283,  0.0654],\n",
+      "        [ 0.2471, -0.0271,  0.0656],\n",
+      "        [ 0.6073,  0.0330, -0.0602],\n",
+      "        [-0.3998,  0.0159, -0.1297]])\n",
+      "tensor([[ 4.7040e-01,  7.7290e-01,  4.2590e-01],\n",
+      "        [ 9.0770e-03, -8.2630e-02, -9.9650e-01],\n",
+      "        [-1.4490e-02, -2.8270e-02,  9.9950e-01],\n",
+      "        [-2.1830e-02, -8.8620e-01,  4.6280e-01],\n",
+      "        [ 4.3200e-04,  7.8660e-03, -1.0000e+00],\n",
+      "        [-4.0800e-04, -1.7540e-01,  9.8450e-01],\n",
+      "        [ 1.7770e-02,  1.1870e-01,  9.9280e-01],\n",
+      "        [-5.0350e-02, -3.1960e-01,  9.4620e-01],\n",
+      "        [-2.0000e-05,  5.8870e-01, -8.0840e-01],\n",
+      "        [ 1.1790e-02,  9.0310e-01, -4.2920e-01]])\n",
+      "tensor([2, 0, 0, 3, 2, 0, 0, 0, 0, 3])\n",
+      "tensor([[-0.5343, -0.0267, -0.0338],\n",
+      "        [ 0.7813,  0.0034, -0.0305],\n",
+      "        [-0.5250, -0.0042,  0.0260],\n",
+      "        [-0.5671,  0.0025,  0.1981],\n",
+      "        [-0.4465, -0.0097, -0.1202],\n",
+      "        [-0.3994, -0.0588,  0.0613],\n",
+      "        [-0.3510, -0.0631,  0.0694],\n",
+      "        [-0.1868, -0.0051, -0.1227],\n",
+      "        [-0.1523, -0.0802, -0.0488],\n",
+      "        [ 0.1376, -0.0780, -0.0550]])\n",
+      "tensor([[-0.2884, -0.5583,  0.7779],\n",
+      "        [-0.1687, -0.9235,  0.3445],\n",
+      "        [-0.2122,  0.4531, -0.8659],\n",
+      "        [-0.2342, -0.9687, -0.0826],\n",
+      "        [-0.0410, -0.9232, -0.3820],\n",
+      "        [ 0.1174,  0.9904, -0.0724],\n",
+      "        [ 0.1052,  0.9891, -0.1035],\n",
+      "        [-0.0167, -0.1576, -0.9874],\n",
+      "        [-0.0429, -0.9857,  0.1629],\n",
+      "        [-0.7026, -0.1635,  0.6925]])\n",
+      "tensor([2, 0, 2, 2, 2, 0, 0, 0, 0, 0])\n",
+      "tensor([[-7.4200e-01,  1.0763e-01,  4.2520e-03],\n",
+      "        [-2.2678e-01, -5.2463e-02, -3.0582e-02],\n",
+      "        [ 1.1025e-01,  1.7271e-04, -4.3668e-01],\n",
+      "        [ 8.7156e-01, -6.0690e-02,  5.4731e-04],\n",
+      "        [ 1.6981e-01, -5.7563e-02, -3.0582e-02],\n",
+      "        [ 1.5682e-01, -5.7154e-02, -3.2795e-02],\n",
+      "        [ 3.1095e-01, -3.7981e-02,  6.7256e-02],\n",
+      "        [-4.5350e-02,  1.6964e-02, -5.7753e-01],\n",
+      "        [-7.3881e-02, -2.1454e-02, -3.1187e-01],\n",
+      "        [ 9.6300e-01, -3.5575e-02, -2.5578e-02]])\n",
+      "tensor([[-5.0000e-06,  4.9890e-02, -9.9880e-01],\n",
+      "        [-2.5930e-02, -8.5660e-01, -5.1530e-01],\n",
+      "        [ 2.4750e-02,  9.8000e-01,  1.9750e-01],\n",
+      "        [ 2.8090e-02, -7.5730e-01, -6.5250e-01],\n",
+      "        [ 2.0000e-06, -9.8470e-01, -1.7420e-01],\n",
+      "        [ 2.2000e-05, -9.8470e-01, -1.7430e-01],\n",
+      "        [ 6.1760e-01, -5.5510e-01,  5.5710e-01],\n",
+      "        [ 5.9840e-01, -6.2100e-01, -5.0620e-01],\n",
+      "        [-7.3700e-01, -5.8830e-01,  3.3260e-01],\n",
+      "        [ 2.0870e-01, -5.9710e-01, -7.7460e-01]])\n",
+      "tensor([2, 0, 1, 0, 0, 0, 0, 1, 1, 0])\n",
+      "tensor([[-3.6267e-01,  5.8576e-02,  8.5436e-02],\n",
+      "        [ 1.8464e-01, -6.0699e-02,  2.9162e-01],\n",
+      "        [-7.4789e-01,  5.4503e-02, -3.1234e-02],\n",
+      "        [-8.7393e-01,  5.1538e-02, -6.7420e-03],\n",
+      "        [-8.6076e-01,  3.6383e-02, -9.2909e-03],\n",
+      "        [-7.6338e-01,  4.9377e-02, -2.8214e-02],\n",
+      "        [ 6.2780e-01, -2.9967e-04, -6.4372e-03],\n",
+      "        [-2.3007e-01, -2.9967e-04, -1.1507e-02],\n",
+      "        [ 7.1493e-01, -2.9967e-04, -7.1575e-03],\n",
+      "        [ 3.0402e-01, -1.3656e-01, -2.6643e-01]])\n",
+      "tensor([[-9.6300e-04,  7.0790e-01,  7.0630e-01],\n",
+      "        [-5.4190e-01,  2.5280e-01, -8.0150e-01],\n",
+      "        [-1.6110e-01,  2.2720e-01, -9.6040e-01],\n",
+      "        [-1.9110e-01,  4.2300e-04, -9.8160e-01],\n",
+      "        [-1.9550e-01,  4.0750e-02, -9.7990e-01],\n",
+      "        [-2.0900e-01,  8.5360e-02, -9.7420e-01],\n",
+      "        [-3.9000e-05, -8.9660e-01, -4.4280e-01],\n",
+      "        [ 3.1000e-05, -4.2360e-01, -9.0590e-01],\n",
+      "        [ 3.4100e-02, -7.7930e-01, -6.2580e-01],\n",
+      "        [-4.9140e-02, -7.7470e-01,  6.3040e-01]])\n",
+      "tensor([0, 3, 0, 0, 0, 0, 0, 0, 0, 3])\n",
+      "tensor([[-0.0165, -0.1383,  0.1750],\n",
+      "        [-0.8350,  0.2320,  0.0029],\n",
+      "        [-0.8441,  0.2713,  0.0026],\n",
+      "        [ 0.2041,  0.0029, -0.0914],\n",
+      "        [ 0.7941,  0.0831,  0.0370],\n",
+      "        [ 0.6171, -0.0807,  0.0343],\n",
+      "        [ 0.4745, -0.0801,  0.0361],\n",
+      "        [ 0.8916,  0.0094, -0.0541],\n",
+      "        [ 0.4779, -0.0764, -0.0386],\n",
+      "        [ 0.4070, -0.0716, -0.0481]])\n",
+      "tensor([[ 1.0500e-01, -8.1290e-02,  9.9110e-01],\n",
+      "        [-5.8060e-01, -1.5950e-01,  7.9840e-01],\n",
+      "        [-1.2820e-01, -4.6770e-02, -9.9060e-01],\n",
+      "        [ 0.0000e+00,  1.2650e-02, -9.9990e-01],\n",
+      "        [ 1.5520e-01,  8.7940e-01,  4.5020e-01],\n",
+      "        [ 0.0000e+00, -9.2470e-01,  3.8070e-01],\n",
+      "        [ 0.0000e+00, -9.3980e-01,  3.4170e-01],\n",
+      "        [ 2.7990e-01,  2.7600e-01, -9.1950e-01],\n",
+      "        [ 2.1000e-05, -8.7990e-01, -4.7510e-01],\n",
+      "        [ 0.0000e+00, -8.9540e-01, -4.4530e-01]])\n",
+      "tensor([3, 2, 2, 0, 0, 0, 0, 0, 0, 0])\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/nibio/.local/lib/python3.8/site-packages/torch_geometric/deprecation.py:12: UserWarning: 'data.DataLoader' is deprecated, use 'loader.DataLoader' instead\n",
+      "  warnings.warn(out)\n"
+     ]
+    }
+   ],
+   "source": [
+    "import os.path as osp\n",
+    "from torch_geometric.transforms import NormalizeScale\n",
+    "from torch_geometric.data import DataLoader\n",
+    "from my_data_loader import MyData\n",
+    "import torch_geometric.transforms as T\n",
+    "\n",
+    "# Specify the path to your data\n",
+    "path = \"/home/nibio/mutable-outside-world/code/nibio_graph_sem_seg/maciek_data/plane_maciek\"\n",
+    "\n",
+    "random_jitter_translation = 1e-2\n",
+    "random_rotation_interval_x = 15\n",
+    "random_rotation_interval_y = 15\n",
+    "random_rotation_interval_z = 15\n",
+    "validation_split = 0.2\n",
+    "batch_size = 4\n",
+    "num_workers = 6\n",
+    "\n",
+    "transform = T.Compose([\n",
+    "    T.RandomJitter(random_jitter_translation),\n",
+    "    T.RandomRotate(random_rotation_interval_x, axis=0),\n",
+    "    T.RandomRotate(random_rotation_interval_y, axis=1),\n",
+    "    T.RandomRotate(random_rotation_interval_z, axis=2)\n",
+    "])\n",
+    "pre_transform = T.NormalizeScale()\n",
+    "\n",
+    "\n",
+    "\n",
+    "# Preprocessing: Normalize node features to have zero mean and unit variance\n",
+    "transform = NormalizeScale()\n",
+    "\n",
+    "# Load the dataset and apply the transformations\n",
+    "dataset = MyData(path, transform=transform, split='trainval', pre_transform=transform)\n",
+    "\n",
+    "# Load the datasets into PyTorch DataLoaders\n",
+    "train_loader = DataLoader(dataset, batch_size=2, shuffle=False)\n",
+    "\n",
+    "# Iterate over data in DataLoaders\n",
+    "couter = 0\n",
+    "for batch in train_loader:\n",
+    "    # Here `batch` is a batch of data points, where attributes like \n",
+    "    # `pos`, `x`, and `y` are all stacked and aligned in the batch dimension\n",
+    "    pos, x, y, category = batch.pos, batch.x, batch.y, batch.category\n",
+    "    # print just first 10 items of everyghing\n",
+    "    print(pos[:10])\n",
+    "    print(x[:10])\n",
+    "    print(y[:10])\n",
+    "    \n",
+    "    couter += 1\n",
+    "    if couter > 10:\n",
+    "        break\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}