Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Made compatible with torchsparse1.4 #63

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions LeReS/Minist_Test/lib/spvcnn_classsification.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch.nn as nn
import torchsparse.nn as spnn
from torchsparse.point_tensor import PointTensor
from torchsparse.tensor import PointTensor


from lib.spvcnn_utils import *
__all__ = ['SPVCNN_CLASSIFICATION']
Expand Down Expand Up @@ -114,7 +115,7 @@ def __init__(self, **kwargs):
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
self.avg_pool = spnn.GlobalAveragePooling()
self.avg_pool = spnn.GlobalAvgPool()
self.classifier = nn.Sequential(nn.Linear(cs[4], kwargs['num_classes']))
self.point_transforms = nn.ModuleList([
nn.Sequential(
Expand Down
37 changes: 18 additions & 19 deletions LeReS/Minist_Test/lib/spvcnn_utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import torchsparse.nn.functional as spf
from torchsparse.point_tensor import PointTensor
from torchsparse.utils.kernel_region import *
from torchsparse.utils.helpers import *
import torchsparse.nn.functional as spf#
import torch
from torchsparse.tensor import PointTensor
from torchsparse.tensor import SparseTensor
from torchsparse.nn.utils import get_kernel_offsets


__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point']
Expand All @@ -24,7 +25,6 @@ def initial_voxelize(z, init_res, after_res):
inserted_feat = spf.spvoxelize(z.F, idx_query, counts)

new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)
new_tensor.check()
z.additional_features['idx_query'][1] = idx_query
z.additional_features['counts'][1] = counts
z.C = new_float_coord
Expand All @@ -34,49 +34,48 @@ def initial_voxelize(z, init_res, after_res):

# x: SparseTensor, z: PointTensor
# return: SparseTensor
def point_to_voxel(x, z):
def point_to_voxel(x: SparseTensor, z: PointTensor):
if z.additional_features is None or z.additional_features.get('idx_query') is None\
or z.additional_features['idx_query'].get(x.s) is None:
or z.additional_features['idx_query'].get(x.s[0]) is None:
#pc_hash = hash_gpu(torch.floor(z.C).int())
pc_hash = spf.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s).int() * x.s,
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1))
sparse_hash = spf.sphash(x.C)
idx_query = spf.sphashquery(pc_hash, sparse_hash)
counts = spf.spcount(idx_query.int(), x.C.shape[0])
z.additional_features['idx_query'][x.s] = idx_query
z.additional_features['counts'][x.s] = counts
z.additional_features['idx_query'][x.s[0]] = idx_query
z.additional_features['counts'][x.s[0]] = counts
else:
idx_query = z.additional_features['idx_query'][x.s]
counts = z.additional_features['counts'][x.s]
idx_query = z.additional_features['idx_query'][x.s[0]]
counts = z.additional_features['counts'][x.s[0]]

inserted_feat = spf.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, x.C, x.s)
new_tensor.coord_maps = x.coord_maps
new_tensor.kernel_maps = x.kernel_maps
new_tensor.cmaps = x.cmaps
new_tensor.kmaps = x.kmaps

return new_tensor


# x: SparseTensor, z: PointTensor
# return: PointTensor
def voxel_to_point(x, z, nearest=False):
def voxel_to_point(x: SparseTensor, z: PointTensor, nearest=False):
if z.idx_query is None or z.weights is None or z.idx_query.get(
x.s) is None or z.weights.get(x.s) is None:
kr = KernelRegion(2, x.s, 1)
off = kr.get_kernel_offset().to(z.F.device)
off = get_kernel_offsets(2, x.s, 1, z.F.device)
#old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)
old_hash = spf.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s).int() * x.s,
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1), off)
pc_hash = spf.sphash(x.C.to(z.F.device))
idx_query = spf.sphashquery(old_hash, pc_hash)
weights = spf.calc_ti_weights(z.C, idx_query,
scale=x.s).transpose(0, 1).contiguous()
scale=x.s[0]).transpose(0, 1).contiguous()
idx_query = idx_query.transpose(0, 1).contiguous()
if nearest:
weights[:, 1:] = 0.
Expand Down
32 changes: 18 additions & 14 deletions LeReS/Minist_Test/lib/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,10 @@
import numpy as np
import torch
from torchsparse import SparseTensor
from torchsparse.utils import sparse_collate_fn, sparse_quantize
from torchsparse.utils.collate import sparse_collate_fn
from torchsparse.utils.quantize import sparse_quantize
from plyfile import PlyData, PlyElement
import matplotlib.pyplot as plt


def init_image_coor(height, width, u0=None, v0=None):
Expand Down Expand Up @@ -35,20 +37,21 @@ def pcd_to_sparsetensor(pcd, mask_valid, voxel_size=0.01, num_points=100000):
block_ = pcd_valid
block = np.zeros_like(block_)
block[:, :3] = block_[:, :3]

pc_ = np.round(block_[:, :3] / voxel_size)
pc_ = block_
pc_ -= pc_.min(0, keepdims=1)
feat_ = block

# print(pc_.shape)

# transfer point cloud to voxels
inds = sparse_quantize(pc_,
feat_,
pc, inds = sparse_quantize(pc_,
voxel_size,
return_index=True,
return_invs=False)
return_inverse=False)
if len(inds) > num_points:
inds = np.random.choice(inds, num_points, replace=False)

pc = pc_[inds]

feat = feat_[inds]
lidar = SparseTensor(feat, pc)
feed_dict = [{'lidar': lidar}]
Expand All @@ -67,19 +70,20 @@ def pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f= 500.0, voxel_size=0.0
block[:, :] = block_[:, :]


pc_ = np.round(block_[:, :3] / voxel_size)
pc_ = block_[:, :3]
pc_ -= pc_.min(0, keepdims=1)
feat_ = block

# transfer point cloud to voxels
inds = sparse_quantize(pc_,
feat_,
pc, inds = sparse_quantize(pc_,
voxel_size,
return_index=True,
return_invs=False)
return_inverse=False)
if len(inds) > num_points:
inds = np.random.choice(inds, num_points, replace=False)

pc = pc_[inds]


feat = feat_[inds]
lidar = SparseTensor(feat, pc)
feed_dict = [{'lidar': lidar}]
Expand Down