Skip to content

Commit

Permalink
Code push
Browse files Browse the repository at this point in the history
  • Loading branch information
Akash-Kumbar committed Oct 7, 2023
1 parent d31447e commit 2da1fd8
Show file tree
Hide file tree
Showing 49 changed files with 2,714 additions and 0 deletions.
6 changes: 6 additions & 0 deletions .polyscope.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"windowHeight": 914,
"windowPosX": 1882,
"windowPosY": 60,
"windowWidth": 1405
}
Binary file added __pycache__/pcutils.cpython-38.pyc
Binary file not shown.
8 changes: 8 additions & 0 deletions convert_ply.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import open3d as o3d
import numpy as np

xyz= np.loadtxt('')
pcd=o3d.geometry.PointCloud()
pcd.points=o3d.utility.Vector3dVector(xyz)

o3d.io.write_point_cloud(".ply",pcd)
Binary file not shown.
Binary file not shown.
22 changes: 22 additions & 0 deletions denoising_utils/score_based.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import torch
from score_denoise.utils.misc import *
from score_denoise.utils.denoise import *
from score_denoise.models.denoise import *


def denoise(ogPoints, patches, ld_step_size=0.2, ld_num_steps=30, patch_size=256, seed_k=3, denoise_knn=4, step_decay=0.95, get_traj=False):
device = torch.device('cuda')
ckpt = torch.load('score_denoise/pretrained/ckpt.pt', map_location=device)
model = DenoiseNet(ckpt['args']).to(device)
model.load_state_dict(ckpt['state_dict'])

with torch.no_grad():
model.eval()
patches = patches.to(torch.float32)
patches_denoised, traj = model.denoise_langevin_dynamics(patches, step_size=ld_step_size, denoise_knn=denoise_knn, step_decay=step_decay, num_steps=ld_num_steps)

pcl_denoised, fps_idx = farthest_point_sampling(patches_denoised.view(1, -1, 3), ogPoints)
pcl_denoised = pcl_denoised[0]
fps_idx = fps_idx[0]

return pcl_denoised
25 changes: 25 additions & 0 deletions imgui.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
[Window][Debug##Default]
Pos=60,60
Size=400,400
Collapsed=0

[Window][Polyscope]
Pos=10,10
Size=305,156
Collapsed=0

[Window][Structures]
Pos=10,186
Size=305,718
Collapsed=0

[Window][Controls]
Pos=325,10
Size=32,32
Collapsed=0

[Window][Selection]
Pos=895,30
Size=500,128
Collapsed=0

Binary file added noising_utils/__pycache__/noise.cpython-38.pyc
Binary file not shown.
Binary file added noising_utils/__pycache__/noise.cpython-39.pyc
Binary file not shown.
76 changes: 76 additions & 0 deletions noising_utils/noise.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import numpy as np
import torch
import pytorch3d.ops as ops
from tqdm.auto import tqdm
from sklearn.cluster import KMeans
from sklearn.neighbors import kneighbors_graph, KDTree

def genGaussianNoise(xyz, std=0.015):
noise = torch.randn_like(xyz) * std
return xyz + noise

def genLaplacianNoise(xyz, std=0.01):
noise = torch.from_numpy(np.random.laplace(0, std, size=xyz.shape)).cuda()
# print(type(noise))
return xyz + noise

def genDiscreteNoise(xyz, std=0.01):
scale = std
prob = 0.1
template = np.array([
[scale, 0, 0],
[-scale, 0, 0],
[0, scale, 0],
[0, -scale, 0],
[0, 0, scale],
[0, 0, -scale],
], dtype=np.float32)
num_points = xyz.shape[0]
uni_rand = np.random.uniform(size=num_points)
noise = np.zeros([num_points, 3])
for i in range(template.shape[0]):
idx = np.logical_and(0.1*i <= uni_rand, uni_rand < 0.1*(i+1))
noise[idx] = template[i].reshape(1, 3)
noise = torch.FloatTensor(noise).to(xyz)
return xyz + noise

def genUniformBallNoise(xyz, std):
scale = std
N = xyz.shape[0]
phi = np.random.uniform(0, 2*np.pi, size=N)
costheta = np.random.uniform(-1, 1, size=N)
u = np.random.uniform(0, 1, size=N)
theta = np.arccos(costheta)
r = scale * u ** (1/3)
noise = np.zeros([N, 3])
noise[:, 0] = r * np.sin(theta) * np.cos(phi)
noise[:, 1] = r * np.sin(theta) * np.sin(phi)
noise[:, 2] = r * np.cos(theta)
noise = torch.FloatTensor(noise).to(xyz)
return xyz + noise

def genCovNoise(xyz, std):
num_points = xyz.shape[0]
cov = np.cov(xyz.cpu().numpy().T)
cov = torch.FloatTensor(cov)
# print(cov.shape)

# exit()
std_factor = std
noise = np.random.multivariate_normal(np.zeros(3), cov.numpy(), num_points)
noise = torch.FloatTensor(noise).to(xyz)
return xyz + noise*std_factor


def addNoiseToPC(xyz, std, noise_type='Gaussian'):
if noise_type == 'Gaussian':
noise = genGaussianNoise(xyz, std)
elif noise_type == 'Laplacian':
noise = genLaplacianNoise(xyz, std)
elif noise_type == 'Discrete':
noise = genDiscreteNoise(xyz, std)
elif noise_type == 'UniformBall':
noise = genUniformBallNoise(xyz, std)
elif noise_type == 'Covariance':
noise = genCovNoise(xyz, std)
return torch.cat([xyz, noise], dim=0)
251 changes: 251 additions & 0 deletions pcutils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
import torch
import numpy as np
from torch_cluster import fps
from pytorch3d.ops import knn_points, ball_query
import open3d as o3d
import polyscope as ps
ps.init()

def visPC(xyz, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
o3d.visualization.draw_geometries([pcd])

### Normalize point cloud
def normalize_point_cloud(input):
"""
input: pc [N, P, 3]
output: pc, centroid, furthest_distance
"""
if len(input.shape) == 2:
axis = 0
elif len(input.shape) == 3:
axis = 1
centroid = np.mean(input, axis=axis, keepdims=True)
input = input - centroid
furthest_distance = np.amax(
np.sqrt(np.sum(input ** 2, axis=-1, keepdims=True)), axis=axis, keepdims=True)
input = input / furthest_distance
return input, centroid, furthest_distance

def farthest_point_sampling(pcls, num_pnts):
"""
Args:
pcls: A batch of point clouds, (B, N, 3).
num_pnts: Target number of points.
"""
ratio = 0.01 + num_pnts / pcls.size(1)
sampled = []
indices = []
for i in range(pcls.size(0)):
idx = fps(pcls[i], ratio=ratio, random_start=False)[:num_pnts]
sampled.append(pcls[i:i+1, idx, :])
indices.append(idx)
sampled = torch.cat(sampled, dim=0)
return sampled, indices

def Minkowski_distance(src, dst, p):
"""
Calculate Minkowski distance between each two points.
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point Minkowski distance, [B, N, M]
"""
return torch.cdist(src,dst,p=p)

def gather_idx(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(
device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points


def gather_idx(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(
device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points

def get_dist(src, dst):
"""
Calculate the Euclidean distance between each point pair in two point clouds.
Inputs:
src[B, M, 3]: point cloud 1
dst[B, N, 3]: point cloud 2
Return:
dist[B, M, N]: distance matrix
"""
print(src.shape)
exit()
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist

def dilated_ball_queryOG(dist, h, base_radius, max_radius):
'''
Density-dilated ball query
Inputs:
dist[B, M, N]: distance matrix
h(float): bandwidth
base_radius(float): minimum search radius
max_radius(float): maximum search radius
Returns:
radius[B, M, 1]: search radius of point
'''

# kernel density estimation (Eq. 8)
sigma = 1
gauss = torch.exp(-(dist)/(2*(h**2)*(sigma**2))) # K(x-x_i/h), [B, M, N]
kd_dist = torch.sum(gauss, dim=-1).unsqueeze(-1) # kernel distance, [B, M, 1]

# normalization
kd_score = kd_dist / (torch.max(kd_dist, dim=1)[0].unsqueeze(-1) + 1e-9) # [B, M, 1]
radius = base_radius + (max_radius - base_radius)*kd_score # kd_score -> max, base_radius -> max_radius

return radius

def dilated_ball_query(dist, h, base_radius, max_radius):
'''
Density-dilated ball query
Inputs:
dist[B, M, N]: distance matrix
h(float): bandwidth
base_radius(float): minimum search radius
max_radius(float): maximum search radius
Returns:
radius[B, M, 1]: search radius of point
'''

# kernel density estimation (Eq. 8)
gauss = 0.5 + ((0.5 * torch.sgn(dist)) * (1 - torch.exp(-dist/h)))

# gauss = torch.exp(-(dist)/(2*(h**2)*(sigma**2))) # K(x-x_i/h), [B, M, N]
kd_dist = torch.sum(gauss, dim=-1).unsqueeze(-1) # kernel distance, [B, M, 1]

# normalization
kd_score = kd_dist / (torch.max(kd_dist, dim=1)[0].unsqueeze(-1) + 1e-9) # [B, M, 1]
radius = base_radius + (max_radius - base_radius)*kd_score # kd_score -> max, base_radius -> max_radius

return radius

def density_aware_knn(x, min_k, max_k, seed_k=3):
base_index = min_k
max_index = max_k
N = x.shape[1]
approx_patch_size = int((min_k + max_k) // 2)
ncentroids = int(seed_k*N/approx_patch_size)
centroid,_ = farthest_point_sampling(x, ncentroids)
dist = get_dist(centroid, x)
sigma = 1
h=0.1
gauss = torch.exp(-(dist)/(2*(h**2)*(sigma**2))) # K(x-x_i/h), [B, M, N]
kd_dist = torch.sum(gauss, dim=-1).unsqueeze(-1) # kernel distance, [B, M, 1]
kd_score = kd_dist / (torch.max(kd_dist, dim=1)[0].unsqueeze(-1) + 1e-9) # [B, M, 1]
ks = torch.ceil(base_index + (max_index - base_index) * kd_score).to(torch.int).squeeze(0).squeeze(1)
patches = []
indices = []
for i in range(ks.shape[0]):
_, idx, points = knn_points(centroid, x, K=ks[i].item(), return_nn=True)

idx = idx.squeeze(0)
points = points.squeeze(0)
patches.append(points[i])
indices.append(idx[i])
# patches.append(knn_points(centroid, x, K=ks[i].item(), return_nn=True)[2].squeeze(0)[i])


return patches, indices


### Convert point cloud to patches
def convertToPatchKNN(xyz, patch_size=256, seed_k=3):
N, d = xyz.size()
xyz = xyz.unsqueeze(0)
seed_pnts, _ = farthest_point_sampling(xyz, int(seed_k * N / patch_size))
_, _, patches = knn_points(seed_pnts, xyz, K=patch_size, return_nn=True)
patches = patches[0]
return patches

def convertToPatchBQ(xyz, radius=0.2, max_patch_size=128, seed_k=4):
N, d = xyz.size()
xyz = xyz.unsqueeze(0)
ncentroids = int(seed_k*N/max_patch_size)
seed_pnts, _ = farthest_point_sampling(xyz, ncentroids)
_,_,patches = ball_query(seed_pnts, xyz, K=max_patch_size,radius=radius, return_nn=True)
patches = patches[0]
return patches

def convertToPatchDilatedBQ(xyz, base_radius=0.05, seed_k=3):
N, d = xyz.size()
approx_patch_size = 256
xyz = xyz.unsqueeze(0)
ncentroids = int(seed_k*N/approx_patch_size)
centroid,_ = farthest_point_sampling(xyz, ncentroids)
dist = get_dist(centroid, xyz)
radius = dilated_ball_query(dist, h=0.1, base_radius=base_radius, max_radius=base_radius*3)
mask = (dist < radius).float().squeeze(0)
xyz = xyz.squeeze(0)
patches = []
for i in range(ncentroids):
indices = (mask[i] == 1).nonzero(as_tuple=True)[0]
patch = xyz[indices]
patches.append(patch)
return patches

def convertToPatchDAKNN(xyz, min_k = 64, max_k = 512, seed_k = 3):
xyz = xyz.unsqueeze(0)
return density_aware_knn(xyz, min_k, max_k)

def readOff(path, n):
meshD = o3d.io.read_triangle_mesh(path)
pcd = meshD.sample_points_uniformly(n)
xyz = np.array(pcd.points, dtype=np.float32)

return xyz

def polyRenderPC(xyz, colors, radius = 0.005):
ps_cloud = ps.register_point_cloud("my points", xyz, enabled=True, radius=radius)
# basic color visualization
ps_cloud.add_color_quantity("color_meant", colors)
ps.show()

if __name__ == '__main__':
temp = torch.rand((1, 10000, 3)).cuda()
# print(convertToPatchMKNN(temp, 256).shape)
sit = density_aware_knn(temp, 64, 512)
for i in sit:
print(i.shape)

Loading

0 comments on commit 2da1fd8

Please sign in to comment.