diff --git a/.gitignore b/.gitignore
index 00433c2..efedb60 100644
--- a/.gitignore
+++ b/.gitignore
@@ -137,14 +137,20 @@ dmypy.json
.pyre/
# Datasets
+data_by*
data/
+data_cube_2/
logs/
results/
+results_by*
+geometry/
output/
dataset/
!BundleTrack/XMem/inference/data
run_used.yml
cn_traj/
+frames_annotated*
+frames_to_annotate*
# IDE
.vscode
@@ -152,4 +158,6 @@ cn_traj/
# Data files
*.npy
*.png
-*.obj
\ No newline at end of file
+*.obj
+!media/*
+!assets/**/*
diff --git a/BundleTrack/.gitignore b/BundleTrack/.gitignore
index 3ec97a8..eaca6b0 100644
--- a/BundleTrack/.gitignore
+++ b/BundleTrack/.gitignore
@@ -27,4 +27,6 @@ lf-net-release/release
masks/
*.tar.gz
*.pth
-!SwiftNet/swiftnet_resnet18_old.pth
\ No newline at end of file
+!SwiftNet/swiftnet_resnet18_old.pth
+
+LoFTR/weights
\ No newline at end of file
diff --git a/BundleTrack/LoFTR/demo/demo_loftr.py b/BundleTrack/LoFTR/demo/demo_loftr.py
index 60974d9..d1f5b44 100644
--- a/BundleTrack/LoFTR/demo/demo_loftr.py
+++ b/BundleTrack/LoFTR/demo/demo_loftr.py
@@ -15,7 +15,7 @@
import numpy as np
import matplotlib.cm as cm
-os.sys.path.append("../") # Add the project directory
+os.sys.path.append("../") # Add the BundleTrack project directory
from src.loftr import LoFTR, default_cfg
from src.config.default import get_cfg_defaults
try:
diff --git a/BundleTrack/config_behave.yml b/BundleTrack/config_behave.yml
index 85840cb..9ff952d 100644
--- a/BundleTrack/config_behave.yml
+++ b/BundleTrack/config_behave.yml
@@ -1,4 +1,10 @@
-data_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/behave/BOWEN_ADDON/Date03_Sub03_boxmedium.2.color
+# NOTE: This is NOT the BundleSDF configuration file for BundleSDF/PLL
+# collaboration project. Make edits in assets/config_toss_nerf.yml instead for
+# that project.
+
+bundlesdf_run_id: null # To be filled in for an experiment.
+
+video_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/behave/BOWEN_ADDON/Date03_Sub03_boxmedium.2.color
model_name: textured_simple
model_dir: ""
debug_dir: /home/bowen/debug/BundleTrack/
diff --git a/BundleTrack/config_ho3d.yml b/BundleTrack/config_ho3d.yml
index 29f114c..7a115b9 100644
--- a/BundleTrack/config_ho3d.yml
+++ b/BundleTrack/config_ho3d.yml
@@ -1,4 +1,10 @@
-data_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/SM1
+# NOTE: This is NOT the BundleSDF configuration file for BundleSDF/PLL
+# collaboration project. Make edits in assets/config_toss_nerf.yml instead for
+# that project.
+
+bundlesdf_run_id: null # To be filled in for an experiment.
+
+video_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/SM1
model_name: textured_simple
model_dir: ""
debug_dir: /home/bowen/debug/BundleTrack/
diff --git a/BundleTrack/config_toss.yml b/BundleTrack/config_toss.yml
new file mode 100644
index 0000000..0cea8cf
--- /dev/null
+++ b/BundleTrack/config_toss.yml
@@ -0,0 +1,121 @@
+# BundleSDF configuration file for BundleSDF/PLL collaboration project.
+
+bundlesdf_run_id: null # To be filled in for an experiment.
+
+video_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/SM1
+model_name: textured_simple
+model_dir: ""
+debug_dir: /home/bowen/debug/BundleTrack/
+init_pose_dir: ""
+SPDLOG: 2
+port: '5555'
+seg_port: '1111'
+nerf_port: "9999"
+
+toss_frames: []
+
+visible_angle: 70 # Angle between normal and point to camera origin within XXX
+ # is regarded as visible
+erode_mask: 3
+
+segmentation:
+ ob_scales: [0.3,0.3,0.3]
+ tolerance: 0.03
+
+depth_processing:
+ zfar: 2.0 # 1 is too small for cube dataset
+ erode:
+ radius: 1
+ diff: 0.001
+ ratio: 0.8 #if ratio larger than this, depth set to 0
+ bilateral_filter:
+ radius: 2
+ sigma_D: 2
+ sigma_R: 100000
+ outlier_removal:
+ num: 30
+ std_mul: 3
+ edge_normal_thres: 10 #deg between normal and ray
+ denoise_cloud: False
+ percentile: 95
+
+bundle:
+ num_iter_outter: 7
+ num_iter_inner: 5
+ window_size: 5 #exclude keyframes, include new frame
+ max_BA_frames: 10
+ subset_selection_method: normal_orientation_nearest
+ depth_association_radius: 5 # 0: findDenseCorr;
+ # 1: findDenseCorrNearestNeighbor3D
+ non_neighbor_max_rot: 90
+ non_neighbor_min_visible: 0.1 # ratio of pixel visible
+ icp_pose_rot_thres: 60 # Rotation larger than XX deg is ignored for icp
+ w_rpi: 0
+ w_p2p: 1 # Used in loss.cpp
+ w_fm: 1
+ w_sdf: 0
+ w_pm: 0
+ robust_delta: 0.005
+ min_fm_edges_newframe: 15
+ image_downscale: [4]
+ feature_edge_dist_thres: 0.01
+ feature_edge_normal_thres: 30 # Normal angle should be within this range
+ max_optimized_feature_loss: 0.03
+
+keyframe:
+ min_interval: 1
+ min_feat_num: 0
+ min_trans: 0
+ min_rot: 10 # 5
+ min_visible: 1
+
+sift:
+ scales: [2,4,8]
+ max_match_per_query: 5
+ nOctaveLayers: 3
+ contrastThreshold: 0.01
+ edgeThreshold: 50
+ sigma: 1.6
+
+feature_corres:
+ mutual: True
+ map_points: True
+ max_dist_no_neighbor: 0.02 # 0.01
+ max_normal_no_neighbor: 45 # 20
+ max_dist_neighbor: 0.03 # 0.02
+ max_normal_neighbor: 45 # 30
+ suppression_patch_size: 5
+ max_view_normal_angle: 180
+ min_match_with_ref: 5
+ resize: 400
+ rematch_after_nerf: True
+
+ransac:
+ max_iter: 2000
+ num_sample: 3
+ inlier_dist: 0.01
+ inlier_normal_angle: 20
+ desired_succ_rate: 0.99
+ max_trans_neighbor: 0.02 #ransac model estimated pose shouldnt be too far
+ max_rot_deg_neighbor: 30
+ max_trans_no_neighbor: 0.01
+ max_rot_no_neighbor: 10
+ epipolar_thres: 1
+ min_match_after_ransac: 5
+
+p2p:
+ projective: false
+ max_dist: 0.02
+ max_normal_angle: 45
+
+sdf_edge:
+ max_dist: 0.02
+
+shape:
+ res: 0.005
+ xrange: [-0.2,0.2]
+ yrange: [-0.2,0.2]
+ zrange: [-0.2,0.2]
+ max_weight: 100
+ truncate_dist: 0.005
+
diff --git a/BundleTrack/config_ycbineoat.yml b/BundleTrack/config_ycbineoat.yml
index 47e461e..730890a 100644
--- a/BundleTrack/config_ycbineoat.yml
+++ b/BundleTrack/config_ycbineoat.yml
@@ -1,4 +1,10 @@
-data_dir: /home/bowen/debug/ycbineoat
+# NOTE: This is NOT the BundleSDF configuration file for BundleSDF/PLL
+# collaboration project. Make edits in assets/config_toss_nerf.yml instead for
+# that project.
+
+bundlesdf_run_id: null # To be filled in for an experiment.
+
+video_dir: /home/bowen/debug/ycbineoat
model_name: textured_simple
model_dir: ""
debug_dir: /home/bowen/debug/BundleTrack/
diff --git a/BundleTrack/scripts/data_reader.py b/BundleTrack/scripts/data_reader.py
index 6bc46f4..eb928e4 100644
--- a/BundleTrack/scripts/data_reader.py
+++ b/BundleTrack/scripts/data_reader.py
@@ -13,16 +13,25 @@
yaml = ruamel.yaml.YAML()
code_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(f'{code_dir}/../../')
-from Utils import *
+from bundlenets.Utils import *
HO3D_ROOT = '/mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3'
class YcbineoatReader:
- def __init__(self,video_dir, downscale=1, shorter_side=None):
+ def __init__(self,video_dir, downscale=1, shorter_side=None, occluded=None,offset_frames=1):
self.video_dir = video_dir
self.downscale = downscale
- self.color_files = sorted(glob.glob(f"{self.video_dir}/rgb/*.png"))
+ occ_label = '' if occluded is None else f'_{occluded}'
+ offset_label = '' if offset_frames == 1 else f'_offset_{offset_frames}'
+ self.rgb_folder = f'rgb{occ_label}{offset_label}'
+ self.mask_folder = f'masks{occ_label}{offset_label}'
+ self.depth_folder = f'depth{offset_label}'
+ self.gt_pose_folder = f'annotated_poses{offset_label}'
+ # Check if depth folder exists
+ if not os.path.exists(f"{self.video_dir}/{self.depth_folder}"):
+ self.depth_folder = 'depth'
+ self.color_files = sorted(glob.glob(f"{self.video_dir}/{self.rgb_folder}/*.png"))
self.K = np.loadtxt(f'{video_dir}/cam_K.txt').reshape(3,3)
self.id_strs = []
for color_file in self.color_files:
@@ -37,7 +46,7 @@ def __init__(self,video_dir, downscale=1, shorter_side=None):
self.W = int(self.W*self.downscale)
self.K[:2] *= self.downscale
- self.gt_pose_files = sorted(glob.glob(f'{self.video_dir}/annotated_poses/*'))
+ self.gt_pose_files = sorted(glob.glob(f'{self.video_dir}/{self.gt_pose_folder}/*'))
self.videoname_to_object = {
'bleach0': "021_bleach_cleanser",
@@ -73,14 +82,14 @@ def get_color(self,i):
return color
def get_mask(self,i):
- mask = cv2.imread(self.color_files[i].replace('rgb','masks'),-1)
+ mask = cv2.imread(self.color_files[i].replace(self.rgb_folder, self.mask_folder),-1)
if len(mask.shape)==3:
mask = (mask.sum(axis=-1)>0).astype(np.uint8)
mask = cv2.resize(mask, (self.W,self.H), interpolation=cv2.INTER_NEAREST)
return mask
def get_depth(self,i):
- depth = cv2.imread(self.color_files[i].replace('rgb','depth'),-1)/1e3
+ depth = cv2.imread(self.color_files[i].replace(self.rgb_folder, self.depth_folder),-1)/1e3
depth = cv2.resize(depth, (self.W,self.H), interpolation=cv2.INTER_NEAREST)
return depth
@@ -91,12 +100,12 @@ def get_xyz_map(self,i):
return xyz_map
def get_occ_mask(self,i):
- hand_mask_file = self.color_files[i].replace('rgb','masks_hand')
+ hand_mask_file = self.color_files[i].replace(self.rgb_folder,'masks_hand')
occ_mask = np.zeros((self.H,self.W), dtype=bool)
if os.path.exists(hand_mask_file):
occ_mask = occ_mask | (cv2.imread(hand_mask_file,-1)>0)
- right_hand_mask_file = self.color_files[i].replace('rgb','masks_hand_right')
+ right_hand_mask_file = self.color_files[i].replace(self.rgb_folder,'masks_hand_right')
if os.path.exists(right_hand_mask_file):
occ_mask = occ_mask | (cv2.imread(right_hand_mask_file,-1)>0)
@@ -181,5 +190,5 @@ def get_gt_pose(self,i):
else:
ob_in_cam_gt[:3,3] = meta['objTrans']
ob_in_cam_gt[:3,:3] = cv2.Rodrigues(meta['objRot'].reshape(3))[0]
- ob_in_cam_gt = glcam_in_cvcam@ob_in_cam_gt
+ ob_in_cam_gt = GLCAM_IN_CVCAM@ob_in_cam_gt
return ob_in_cam_gt
\ No newline at end of file
diff --git a/BundleTrack/src/Bundler.cpp b/BundleTrack/src/Bundler.cpp
index fa97953..2b4c46d 100644
--- a/BundleTrack/src/Bundler.cpp
+++ b/BundleTrack/src/Bundler.cpp
@@ -262,7 +262,9 @@ void Bundler::processNewFrame(std::shared_ptr frame)
bool Bundler::checkAndAddKeyframe(std::shared_ptr frame)
{
- if (frame->_id==0)
+ const auto toss_frames = (*yml)["toss_frames"].as>();
+ int cnt = count(toss_frames.begin(), toss_frames.end(), frame->_id + 1);
+ if (frame->_id==0 || cnt>0)
{
_keyframes.push_back(frame);
SPDLOG("Added frame {} as keyframe, current #keyframe: {}", frame->_id_str, _keyframes.size());
diff --git a/BundleTrack/src/Frame.cpp b/BundleTrack/src/Frame.cpp
index 1073fc2..e748844 100644
--- a/BundleTrack/src/Frame.cpp
+++ b/BundleTrack/src/Frame.cpp
@@ -162,11 +162,100 @@ void Frame::setNewInitCoordinate()
pcl::io::savePLYFile(fmt::format("{}/cloud_init.ply", debug_dir), *cloud);
Utils::outlierRemovalStatistic(cloud,cloud,3,30);
pcl::io::savePLYFile(fmt::format("{}/cloud_for_init_coord.ply", debug_dir), *cloud);
- Eigen::MatrixXf mat = cloud->getMatrixXfMap(); // (D,N)
- Eigen::MatrixXf pts = mat.block(0,0,3,cloud->points.size());
- Eigen::Vector3f max_xyz = pts.rowwise().maxCoeff();
- Eigen::Vector3f min_xyz = pts.rowwise().minCoeff();
- _pose_in_model.block(0,3,3,1) << -(max_xyz+min_xyz)/2;
+
+ // Create the KdTree object for the search method of the extraction
+ pcl::search::KdTree::Ptr tree(new pcl::search::KdTree);
+ tree->setInputCloud(cloud);
+
+ // Set up the cluster extraction
+ std::vector cluster_indices;
+ pcl::EuclideanClusterExtraction ec;
+ ec.setClusterTolerance(0.1); // 10cm
+ ec.setMinClusterSize(100); // Adjust based on your point cloud density
+ ec.setMaxClusterSize(50000); // Adjust based on your expected object size
+ ec.setSearchMethod(tree);
+ ec.setInputCloud(cloud);
+ ec.extract(cluster_indices);
+
+ // Find the largest cluster
+ size_t max_size = 0;
+ int largest_cluster_idx = -1;
+ for (size_t i = 0; i < cluster_indices.size(); ++i) {
+ if (cluster_indices[i].indices.size() > max_size) {
+ max_size = cluster_indices[i].indices.size();
+ largest_cluster_idx = i;
+ }
+ }
+
+ if (largest_cluster_idx >= 0) {
+ // Create a new point cloud containing only the largest cluster
+ PointCloudRGBNormal::Ptr largest_cluster(new PointCloudRGBNormal);
+ pcl::ExtractIndices extract;
+ pcl::PointIndices::Ptr indices(new pcl::PointIndices(cluster_indices[largest_cluster_idx]));
+ extract.setInputCloud(cloud);
+ extract.setIndices(indices);
+ extract.setNegative(false);
+ extract.filter(*largest_cluster);
+
+ // Save the largest cluster for debugging
+ pcl::io::savePLYFile(fmt::format("{}/cloud_for_init_coord_cluster.ply", debug_dir), *largest_cluster);
+
+ // Calculate centroid of largest cluster
+ Eigen::Vector3f centroid = Eigen::Vector3f::Zero();
+ for (const auto& pt : largest_cluster->points) {
+ centroid += pt.getVector3fMap();
+ }
+ centroid /= largest_cluster->points.size();
+
+ _pose_in_model.block(0,3,3,1) << -centroid;
+ } else{
+ // Fallback if no clusters found
+ std::cerr << "No clusters found!" << std::endl;
+
+ // Get Z values
+ std::vector z_values;
+ for (const auto& pt : cloud->points) {
+ z_values.push_back(pt.z);
+ }
+
+ // Calculate Z statistics
+ float z_mean = std::accumulate(z_values.begin(), z_values.end(), 0.0f) / z_values.size();
+
+ // Calculate standard deviation
+ float z_variance = 0.0f;
+ for (const float z : z_values) {
+ float diff = z - z_mean;
+ z_variance += diff * diff;
+ }
+ z_variance /= (z_values.size() - 1); // Use N-1 for sample standard deviation
+ float z_std = std::sqrt(z_variance);
+
+ // Filter points within n standard deviations
+ PointCloudRGBNormal::Ptr filtered_cloud(new PointCloudRGBNormal);
+ for (const auto& pt : cloud->points) {
+ if (std::abs(pt.z - z_mean) <= 2.0f * z_std) { // 2 sigma rule
+ filtered_cloud->points.push_back(pt);
+ }
+ }
+ pcl::io::savePLYFile(fmt::format("{}/cloud_for_init_coord_2sigma.ply", debug_dir), *filtered_cloud);
+
+ // Calculate mean of all points in filtered cloud
+ Eigen::Vector3f sum = Eigen::Vector3f::Zero();
+ for (const auto& pt : filtered_cloud->points) {
+ sum += pt.getVector3fMap();
+ }
+ Eigen::Vector3f mean = sum / filtered_cloud->points.size();
+ _pose_in_model.block(0,3,3,1) << -mean;
+ }
+
+ // // Use filtered cloud for initialization
+ // Eigen::MatrixXf mat = filtered_cloud->getMatrixXfMap();
+ // Eigen::MatrixXf pts = mat.block(0,0,3,filtered_cloud->points.size());
+ // // Eigen::MatrixXf mat = cloud->getMatrixXfMap(); // (D,N)
+ // // Eigen::MatrixXf pts = mat.block(0,0,3,cloud->points.size());
+ // Eigen::Vector3f max_xyz = pts.rowwise().maxCoeff();
+ // Eigen::Vector3f min_xyz = pts.rowwise().minCoeff();
+ // _pose_in_model.block(0,3,3,1) << -(max_xyz+min_xyz)/2;
}
void Frame::updateDepthCPU()
diff --git a/T_support_points.pt b/T_support_points.pt
deleted file mode 100644
index d55e60c..0000000
Binary files a/T_support_points.pt and /dev/null differ
diff --git a/assets/config_toss_nerf.yml b/assets/config_toss_nerf.yml
new file mode 100644
index 0000000..67e89c5
--- /dev/null
+++ b/assets/config_toss_nerf.yml
@@ -0,0 +1,140 @@
+# NeRF configuration file for BundleSDF/PLL collaboration project.
+
+### To be filled in for an experiment.
+bundlesdf_run_id: null
+video_dir: null
+debug_dir: null
+nerf_temp_dir: null
+nerf_dir: null
+geometry_dir: null
+contact_in_cam_dir: null
+
+use_pll_for_training: false
+use_hpc: false
+###
+
+notes: ''
+n_step: 500 # 500 # This number is used as the online nerf training steps.
+netdepth: 8
+netwidth: 256
+netdepth_fine: 8
+netwidth_fine: 256
+N_rand: 2048 # Batch number of rays
+first_frame_ray_in_batch: 0
+lrate: 0.01
+lrate_pose: 0.01
+pose_optimize_start: 0
+decay_rate: 0.1
+chunk: 99999999999
+netchunk: 6553600
+no_batching: 0
+amp: true
+
+N_samples: 64 # 2000 # 128 #number of coarse samples per ray
+N_samples_around_depth: 256 # 64
+N_importance: 0
+N_importance_iter: 1
+perturb: 1
+use_viewdirs: 1
+i_embed: 1 #set 1 for hashed embedding, 0 for default positional encoding, 2 for spherical; 3 for octree grid
+i_embed_views: 2 #set 1 for hashed embedding, 0 for default positional encoding, 2 for spherical
+multires: 8 #log2 of max freq for positional encoding (3D location)
+multires_views: 3 #log2 of max freq for positional encoding (2D direction)
+feature_grid_dim: 2
+raw_noise_std: 0
+white_bkgd: 0
+gradient_max_norm: 0.1
+gradient_pose_max_norm: 0.1
+
+# logging/saving options
+i_print: 200 #999999
+i_img: .inf # 999999
+i_weights: 999999
+i_mesh: .inf # 999999
+i_nerf_normals: .inf # 999999
+i_save_ray: .inf # 999999
+i_pose: 999999
+save_octree_clouds: True
+octree_with_cnet: False
+octree_convex: False
+
+finest_res: 256 # 128
+base_res: 16
+num_levels: 16 # 4
+log2_hashmap_size: 22
+n_train_image: 500 # 300
+use_octree: 1
+first_frame_weight: 1 # 10
+denoise_depth_use_octree_cloud: true
+octree_embed_base_voxel_size: 0.02
+octree_smallest_voxel_size: 0.02 # This determines the smallest feature vox size
+octree_raytracing_voxel_size: 0.02
+octree_dilate_size: 0.02 # meters
+down_scale_ratio: 1
+bounding_box: [[-1,-1,-1], [1,1,1]]
+farthest_pose_sampling: 0 # Sampling train images. This replace uniform skip
+use_mask: 1
+dilate_mask_size: 0
+rays_valid_depth_only: true
+
+# Near and far determine valid depth ranges in meters. Depth returns that are
+# greater than far or less than near from the camera are masked out.
+near: 0.1
+far: 2.0 # 2 # 1 is too small for cube dataset
+
+rgb_weight: 100 # 10
+depth_weight: 0
+trunc: 0.01 #length of the truncation region in meters
+trunc_start: 0.01
+sdf_lambda: 5
+neg_trunc_ratio: 1 # -trunc distance ratio compared to +trunc
+trunc_decay_type: ''
+sdf_loss_type: l2
+fs_weight: 100
+empty_weight: 0.01
+fs_rgb_weight: 0
+trunc_weight: 6000
+sparse_loss_weight: 0
+tv_loss_weight: 0
+frame_features: 2 # 0 #number of channels of the learnable per-frame features
+optimize_poses: 1 #optimize a pose refinement for the initial poses
+pose_reg_weight: 0
+point_cloud_loss_weight: 0
+point_cloud_loss_normal_weight: 0
+eikonal_weight: 0
+normal_loss_weight: 0
+feature_reg_weight: 0.1
+# contact_pts_weight: 20
+# These following four will be used in online_nerf but will be overwritten in offline global nerf.
+support_pts_weight: 2
+convexity_weight: 1
+convexity_vision_weight: 0
+hc_sdf_weight_lower: 1
+hc_sdf_weight_upper: 0.1
+power_interp: 1
+
+gradient_weight: 0
+pretrain_eikonal_weight: 0
+eps_minimal_surface: 0.5
+pretrain_minimal_surface_weight: 0
+pretrain_hessian_weight: 0
+pretrain_normal_direction_weight: 0
+pretrain_finite_diff_weight: 0
+share_coarse_fine: 1
+mode: sdf
+fs_sdf: 0.1 # 0.001 # Uncertain free space, in double normalized units (e.g.
+ # 0.1 means 10% of the truncation distance.)
+crop: 0
+mesh_resolution: 0.005
+max_trans: 0.02 # meters
+max_rot: 20 # deg
+
+continual: True
+
+######### dbscan
+dbscan_eps: 0.06
+dbscan_eps_min_samples: 1
+
+####### bundlenerf
+sync_max_delay: 0 # 0 for strict sync
+
diff --git a/assets/bottle_textured_mesh.obj b/assets/obselete/bottle_textured_mesh.obj
similarity index 100%
rename from assets/bottle_textured_mesh.obj
rename to assets/obselete/bottle_textured_mesh.obj
diff --git a/assets/box_textured_mesh.obj b/assets/obselete/box_textured_mesh.obj
similarity index 100%
rename from assets/box_textured_mesh.obj
rename to assets/obselete/box_textured_mesh.obj
diff --git a/assets/box_textured_mesh_convex.obj b/assets/obselete/box_textured_mesh_convex.obj
similarity index 100%
rename from assets/box_textured_mesh_convex.obj
rename to assets/obselete/box_textured_mesh_convex.obj
diff --git a/assets/config.yml b/assets/obselete/config.yml
similarity index 86%
rename from assets/config.yml
rename to assets/obselete/config.yml
index 123027f..734eacb 100644
--- a/assets/config.yml
+++ b/assets/obselete/config.yml
@@ -1,3 +1,8 @@
+# NOTE: This is NOT the NeRF configuration file for BundleSDF/PLL collaboration
+# project. Make edits in assets/config_toss_nerf.yml instead for that project.
+
+video_dir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/github/bundlesdf/data/bundlesdf_bundlesdf_e03000196
+
notes: ''
n_step: 500
netdepth: 8
@@ -45,7 +50,6 @@ finest_res: 128
base_res: 16
num_levels: 4
log2_hashmap_size: 22
-datadir: /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/github/bundlesdf/data/bundlesdf_bundlesdf_e03000196
n_train_image: 300
use_octree: 1
first_frame_weight: 10
@@ -84,10 +88,12 @@ point_cloud_loss_normal_weight: 0
eikonal_weight: 0
normal_loss_weight: 0
feature_reg_weight: 0.1
-contact_pts_weight: 20
-support_pts_weight: 10
-hc_sdf_weight_lower: 10
-hc_sdf_weight_upper: 10
+# contact_pts_weight: 20
+# support_pts_weight: 10
+# convexity_weight: 0
+# hc_sdf_weight_lower: 10
+# hc_sdf_weight_upper: 10
+gradient_weight: 0
pretrain_eikonal_weight: 0
eps_minimal_surface: 0.5
pretrain_minimal_surface_weight: 0
@@ -109,3 +115,4 @@ dbscan_eps_min_samples: 1
####### bundlenerf
sync_max_delay: 0 # 0 for strict sync
+
diff --git a/assets/config_cn.yaml b/assets/obselete/config_cn.yaml
similarity index 100%
rename from assets/config_cn.yaml
rename to assets/obselete/config_cn.yaml
diff --git a/assets/dair_bottle_aligned.ply b/assets/obselete/dair_bottle_aligned.ply
similarity index 100%
rename from assets/dair_bottle_aligned.ply
rename to assets/obselete/dair_bottle_aligned.ply
diff --git a/assets/dair_bottle_amp.obj b/assets/obselete/dair_bottle_amp.obj
similarity index 100%
rename from assets/dair_bottle_amp.obj
rename to assets/obselete/dair_bottle_amp.obj
diff --git a/assets/dair_napkin.obj b/assets/obselete/dair_napkin.obj
similarity index 100%
rename from assets/dair_napkin.obj
rename to assets/obselete/dair_napkin.obj
diff --git a/assets/dair_napkin_aligned.ply b/assets/obselete/dair_napkin_aligned.ply
similarity index 100%
rename from assets/dair_napkin_aligned.ply
rename to assets/obselete/dair_napkin_aligned.ply
diff --git a/assets/dair_napkin_amp.obj b/assets/obselete/dair_napkin_amp.obj
similarity index 100%
rename from assets/dair_napkin_amp.obj
rename to assets/obselete/dair_napkin_amp.obj
diff --git a/assets/dair_napkin_convex.obj b/assets/obselete/dair_napkin_convex.obj
similarity index 100%
rename from assets/dair_napkin_convex.obj
rename to assets/obselete/dair_napkin_convex.obj
diff --git a/assets/gt_bottle.obj b/assets/obselete/gt_bottle.obj
similarity index 100%
rename from assets/gt_bottle.obj
rename to assets/obselete/gt_bottle.obj
diff --git a/assets/gt_bottle.urdf b/assets/obselete/gt_bottle.urdf
similarity index 100%
rename from assets/gt_bottle.urdf
rename to assets/obselete/gt_bottle.urdf
diff --git a/assets/gt_bottle_meters.obj b/assets/obselete/gt_bottle_meters.obj
similarity index 100%
rename from assets/gt_bottle_meters.obj
rename to assets/obselete/gt_bottle_meters.obj
diff --git a/assets/gt_bottle_simple_convex.obj b/assets/obselete/gt_bottle_simple_convex.obj
similarity index 100%
rename from assets/gt_bottle_simple_convex.obj
rename to assets/obselete/gt_bottle_simple_convex.obj
diff --git a/assets/gt_bottle_simple_with_normals.obj b/assets/obselete/gt_bottle_simple_with_normals.obj
similarity index 100%
rename from assets/gt_bottle_simple_with_normals.obj
rename to assets/obselete/gt_bottle_simple_with_normals.obj
diff --git a/assets/gt_cube_simple.obj b/assets/obselete/gt_cube_simple.obj
similarity index 100%
rename from assets/gt_cube_simple.obj
rename to assets/obselete/gt_cube_simple.obj
diff --git a/assets/gt_napkin.obj b/assets/obselete/gt_napkin.obj
similarity index 100%
rename from assets/gt_napkin.obj
rename to assets/obselete/gt_napkin.obj
diff --git a/assets/gt_napkin.urdf b/assets/obselete/gt_napkin.urdf
similarity index 100%
rename from assets/gt_napkin.urdf
rename to assets/obselete/gt_napkin.urdf
diff --git a/assets/gt_napkin_meters.obj b/assets/obselete/gt_napkin_meters.obj
similarity index 100%
rename from assets/gt_napkin_meters.obj
rename to assets/obselete/gt_napkin_meters.obj
diff --git a/assets/gt_napkin_simple.obj b/assets/obselete/gt_napkin_simple.obj
similarity index 100%
rename from assets/gt_napkin_simple.obj
rename to assets/obselete/gt_napkin_simple.obj
diff --git a/assets/gt_prism.urdf b/assets/obselete/gt_prism.urdf
similarity index 100%
rename from assets/gt_prism.urdf
rename to assets/obselete/gt_prism.urdf
diff --git a/assets/gt_prism_simple.obj b/assets/obselete/gt_prism_simple.obj
similarity index 100%
rename from assets/gt_prism_simple.obj
rename to assets/obselete/gt_prism_simple.obj
diff --git a/assets/gt_toblerone.urdf b/assets/obselete/gt_toblerone.urdf
similarity index 100%
rename from assets/gt_toblerone.urdf
rename to assets/obselete/gt_toblerone.urdf
diff --git a/assets/gt_toblerone_simple.obj b/assets/obselete/gt_toblerone_simple.obj
similarity index 100%
rename from assets/gt_toblerone_simple.obj
rename to assets/obselete/gt_toblerone_simple.obj
diff --git a/assets/mesh_cleaned_cube0.obj b/assets/obselete/mesh_cleaned_cube0.obj
similarity index 100%
rename from assets/mesh_cleaned_cube0.obj
rename to assets/obselete/mesh_cleaned_cube0.obj
diff --git a/assets/mesh_cleaned_cube2.obj b/assets/obselete/mesh_cleaned_cube2.obj
similarity index 100%
rename from assets/mesh_cleaned_cube2.obj
rename to assets/obselete/mesh_cleaned_cube2.obj
diff --git a/assets/napkin_textured_mesh.obj b/assets/obselete/napkin_textured_mesh.obj
similarity index 100%
rename from assets/napkin_textured_mesh.obj
rename to assets/obselete/napkin_textured_mesh.obj
diff --git a/assets/napkin_textured_mesh_convex.obj b/assets/obselete/napkin_textured_mesh_convex.obj
similarity index 100%
rename from assets/napkin_textured_mesh_convex.obj
rename to assets/obselete/napkin_textured_mesh_convex.obj
diff --git a/assets/napkin_textured_mesh_rescale_simplified.obj b/assets/obselete/napkin_textured_mesh_rescale_simplified.obj
similarity index 100%
rename from assets/napkin_textured_mesh_rescale_simplified.obj
rename to assets/obselete/napkin_textured_mesh_rescale_simplified.obj
diff --git a/assets/napkin_textured_mesh_with_normals.obj b/assets/obselete/napkin_textured_mesh_with_normals.obj
similarity index 100%
rename from assets/napkin_textured_mesh_with_normals.obj
rename to assets/obselete/napkin_textured_mesh_with_normals.obj
diff --git a/assets/realsense_pose_cube_old.yaml b/assets/obselete/realsense_pose_cube_old.yaml
similarity index 100%
rename from assets/realsense_pose_cube_old.yaml
rename to assets/obselete/realsense_pose_cube_old.yaml
diff --git a/assets/test_004.obj b/assets/obselete/test_004.obj
similarity index 100%
rename from assets/test_004.obj
rename to assets/obselete/test_004.obj
diff --git a/assets/test_with_23_iter_344.obj b/assets/obselete/test_with_23_iter_344.obj
similarity index 100%
rename from assets/test_with_23_iter_344.obj
rename to assets/obselete/test_with_23_iter_344.obj
diff --git a/build.sh b/build.sh
index 66ae1e2..d0fc40d 100644
--- a/build.sh
+++ b/build.sh
@@ -1,4 +1,4 @@
ROOT=$(pwd)
-cd /kaolin && pip install -e .
-cd ${ROOT}/mycuda && rm -rf build *egg* && pip install -e .
+# cd /kaolin && pip install -e .
+cd ${ROOT}/mycuda && rm -rf build *egg* && pip install -e . --no-build-isolation
cd ${ROOT}/BundleTrack && rm -rf build && mkdir build && cd build && cmake .. && make -j11
\ No newline at end of file
diff --git a/Utils.py b/bundlenets/Utils.py
similarity index 97%
rename from Utils.py
rename to bundlenets/Utils.py
index f4ab08c..b8a18cc 100644
--- a/Utils.py
+++ b/bundlenets/Utils.py
@@ -34,7 +34,7 @@
BAD_DEPTH = 99
BAD_COLOR = 128
-glcam_in_cvcam = np.array([[1,0,0,0],
+GLCAM_IN_CVCAM = np.array([[1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,1]])
@@ -476,9 +476,11 @@ def ray_trace(self,rays_o,rays_d,level,debug=False):
-def get_optimized_poses_in_real_world(poses_normalized, pose_array, sc_factor, translation):
+def get_optimized_poses_in_real_world(poses_normalized, pose_array, sc_factor,
+ translation):
'''
- @poses_normalized: np array, cam_in_ob (opengl convention), normalized to [-1,1] and centered
+ @poses_normalized: np array, cam_in_ob (opengl convention), normalized to
+ [-1,1] and centered
@pose_array: PoseArray, delta poses
Return:
cam_in_ob, real-world unit, opencv convention
@@ -488,7 +490,8 @@ def get_optimized_poses_in_real_world(poses_normalized, pose_array, sc_factor, t
original_poses[:, :3, 3] -= translation
# Apply pose transformation
- tf = pose_array.get_matrices(np.arange(len(poses_normalized))).reshape(-1,4,4).data.cpu().numpy()
+ tf = pose_array.get_matrices(np.arange(len(poses_normalized))
+ ).reshape(-1,4,4).data.cpu().numpy()
optimized_poses = tf@poses_normalized
optimized_poses = np.array(optimized_poses).astype(np.float32)
@@ -496,16 +499,19 @@ def get_optimized_poses_in_real_world(poses_normalized, pose_array, sc_factor, t
optimized_poses[:, :3, 3] -= translation
original_init_ob_in_cam = optimized_poses[0].copy()
- offset = np.linalg.inv(original_init_ob_in_cam)@original_poses[0] # Anchor to the first frame whose pose shouldn't change
+
+ # Anchor to the first frame whose pose shouldn't change
+ offset = np.linalg.inv(original_init_ob_in_cam)@original_poses[0]
+
for i in range(len(optimized_poses)):
new_ob_in_cam = optimized_poses[i]@offset
optimized_poses[i] = new_ob_in_cam
- optimized_poses[i] = optimized_poses[i]@glcam_in_cvcam
+ optimized_poses[i] = optimized_poses[i]@GLCAM_IN_CVCAM
- return optimized_poses,offset
+ return optimized_poses, offset
-def mesh_to_real_world(mesh,pose_offset,translation,sc_factor):
+def mesh_to_real_world(mesh, pose_offset, translation, sc_factor):
'''
@pose_offset: optimized delta pose of the first frame. Usually it's identity
'''
diff --git a/benchmark_ho3d.py b/bundlenets/benchmark_ho3d.py
similarity index 97%
rename from benchmark_ho3d.py
rename to bundlenets/benchmark_ho3d.py
index 2d84941..368f4fa 100644
--- a/benchmark_ho3d.py
+++ b/bundlenets/benchmark_ho3d.py
@@ -7,11 +7,12 @@
# license agreement from NVIDIA CORPORATION is strictly prohibited.
-from Utils import *
+from bundlenets.Utils import *
import joblib,argparse
import pandas as pd
-code_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(f'{code_dir}/BundleTrack/scripts')
+
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
+sys.path.append(f'{BUNDLENETS_REPO_DIR}/BundleTrack/scripts')
from data_reader import *
diff --git a/bundlenets/bundlesdf.py b/bundlenets/bundlesdf.py
new file mode 100644
index 0000000..52b17d3
--- /dev/null
+++ b/bundlenets/bundlesdf.py
@@ -0,0 +1,1245 @@
+# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+
+from bundlenets.Utils import *
+from bundlenets.nerf_runner import *
+from bundlenets.tool import *
+
+import os.path as op
+
+import my_cpp
+from bundlenets.gui import *
+from BundleTrack.scripts.data_reader import *
+from bundlenets.loftr_wrapper import LoftrRunner
+import multiprocessing,threading
+try:
+ multiprocessing.set_start_method('spawn')
+except:
+ pass
+
+from bundlenets import file_utils
+from tqdm import tqdm
+
+try:
+ import gtsam
+except ImportError:
+ logging.warning("gtsam not found, won't generate poses_all_frame_after_nerf.txt")
+ gtsam = None
+
+def run_gui(gui_dict, gui_lock):
+ print("GUI started")
+ with gui_lock:
+ gui = BundleSdfGui(img_height=200)
+ gui_dict['started'] = True
+
+ local_dict = {}
+
+ while dpg.is_dearpygui_running():
+ with gui_lock:
+ if gui_dict['join']:
+ break
+
+ for k in ['mesh', 'color', 'mask', 'ob_in_cam', 'id_str', 'K',
+ 'n_keyframe','nerf_num_frames']:
+ if k in gui_dict:
+ local_dict[k] = gui_dict[k]
+ del gui_dict[k]
+
+ if 'nerf_num_frames' in local_dict:
+ gui.set_nerf_num_frames(local_dict['nerf_num_frames'])
+
+ if 'mesh' in local_dict:
+ logging.info(f"mesh V: {local_dict['mesh'].vertices.shape}")
+ gui.update_mesh(local_dict['mesh'])
+
+ if 'color' in local_dict:
+ gui.update_frame(rgb=local_dict['color'], mask=local_dict['mask'],
+ ob_in_cam=local_dict['ob_in_cam'],
+ id_str=local_dict['id_str'], K=local_dict['K'],
+ n_keyframe=local_dict['n_keyframe'])
+
+ local_dict = {}
+
+ dpg.render_dearpygui_frame()
+ time.sleep(0.03)
+
+ dpg.destroy_context()
+
+
+def run_nerf(p_dict, kf_to_nerf_list, lock, cfg_nerf, translation, sc_factor,
+ start_nerf_keyframes, use_gui, gui_lock, gui_dict, debug_dir, contact_in_cam_dir):
+ vox_res = 0.01
+ nerf_num_frames = 0
+ cnt_nerf = -1
+ rgbs_all = []
+ depths_all = []
+ normal_maps_all = []
+ masks_all = []
+ occ_masks_all = []
+ if contact_in_cam_dir is not None:
+ ps_all = []
+ sdfs_all = []
+ vs_all = []
+ sdf_bounds_all = []
+ else:
+ ps_all = None
+ sdfs_all = None
+ vs_all = None
+ sdf_bounds_all = None
+ prev_pcd_real_scale = None
+ tf_normalize = None
+ if translation is not None:
+ tf_normalize = np.eye(4)
+ tf_normalize[:3,3] = translation
+ tf1 = np.eye(4)
+ tf1[:3,:3] *= sc_factor
+ tf_normalize = tf1@tf_normalize
+ cfg_nerf['sc_factor'] = float(sc_factor)
+ cfg_nerf['translation'] = translation
+
+ with lock:
+ SPDLOG = p_dict['SPDLOG']
+
+ while 1:
+ with lock:
+ join = p_dict['join']
+
+ if join:
+ break
+
+ skip = False
+ with lock:
+ if cnt_nerf==-1 and len(kf_to_nerf_list)0:
+ p_dict['running'] = True
+ frame_id = p_dict['frame_id']
+ cam_in_obs = p_dict['cam_in_obs'].copy()
+ rgbs = []
+ depths = []
+ normal_maps = []
+ masks = []
+ occ_masks = []
+ if contact_in_cam_dir is not None:
+ ps = []
+ sdfs = []
+ vs = []
+ sdf_bounds = []
+ else:
+ ps, sdfs, vs, sdf_bounds = None, None, None, None ## BIBIT
+ for i_kf, f in enumerate(kf_to_nerf_list):
+ rgbs.append(f['rgb'])
+ depths.append(f['depth'])
+ masks.append(f['mask'])
+ if f['normal_map'] is not None:
+ normal_maps.append(f['normal_map'])
+ if f['occ_mask'] is not None:
+ occ_masks.append(f['occ_mask'])
+ if contact_in_cam_dir is not None:
+ if i_kf == len(kf_to_nerf_list)-1:
+ if f'{frame_id}' in os.listdir(op.join(contact_in_cam_dir, 'from_support_points')):
+ ps_path = op.join(contact_in_cam_dir, 'from_support_points', f'{frame_id}', 'ps.pt')
+ sdfs_path = op.join(contact_in_cam_dir, 'from_support_points', f'{frame_id}', 'sdfs.pt')
+ ps.append(torch.load(ps_path))
+ sdfs.append(torch.load(sdfs_path))
+ else:
+ ps.append(None)
+ sdfs.append(None)
+ if f'{frame_id}' in os.listdir(op.join(contact_in_cam_dir, 'from_mesh_surface')): # could be a smaller set
+ vs_path = op.join(contact_in_cam_dir, 'from_mesh_surface', f'{frame_id}', 'vs.pt')
+ sdf_bounds_path = op.join(contact_in_cam_dir, 'from_mesh_surface', f'{frame_id}', 'sdf_bounds.pt')
+ vs.append(torch.load(vs_path))
+ sdf_bounds.append(torch.load(sdf_bounds_path))
+ else:
+ vs.append(None)
+ sdf_bounds.append(None)
+ else:
+ ps.append(None)
+ sdfs.append(None)
+ vs.append(None)
+ sdf_bounds.append(None)
+ K = p_dict['K']
+ nerf_num_frames += len(rgbs)
+ p_dict['nerf_num_frames'] = nerf_num_frames
+ kf_to_nerf_list[:] = []
+ if use_gui:
+ with gui_lock:
+ gui_dict['nerf_num_frames'] = nerf_num_frames
+ else:
+ skip = True
+
+ if skip:
+ time.sleep(0.01)
+ continue
+
+ cnt_nerf += 1
+ rgbs_all += list(rgbs)
+ depths_all += list(depths)
+ masks_all += list(masks)
+ if normal_maps is not None:
+ normal_maps_all += list(normal_maps)
+ if occ_masks is not None:
+ occ_masks_all += list(occ_masks)
+ if contact_in_cam_dir is not None:
+ ps_all += list(ps)
+ sdfs_all += list(sdfs)
+ vs_all += list(vs)
+ sdf_bounds_all += list(sdf_bounds)
+
+ # This is a NeRF output directory used while simultaneously doing tracking.
+ out_dir = op.join(debug_dir, frame_id, 'nerf')
+ logging.info(f"out_dir: {out_dir}")
+ os.makedirs(out_dir, exist_ok=True)
+ file_utils.remove_and_add_directory(cfg_nerf['nerf_temp_dir'])
+
+ glcam_in_obs = cam_in_obs@GLCAM_IN_CVCAM
+
+ if cfg_nerf['continual']:
+ if cnt_nerf==0:
+ if translation is None:
+ sc_factor, translation, pcd_real_scale, _pcd_normalized = \
+ compute_scene_bounds(None, glcam_in_obs, K, use_mask=True,
+ base_dir=cfg_nerf['nerf_temp_dir'],
+ rgbs=np.array(rgbs_all),
+ depths=np.array(depths_all),
+ masks=np.array(masks_all),
+ eps=cfg_nerf['dbscan_eps'],
+ min_samples=cfg_nerf['dbscan_eps_min_samples'])
+
+ sc_factor *= 0.7 # Ensure whole object within bound
+ cfg_nerf['sc_factor'] = float(sc_factor)
+ cfg_nerf['translation'] = translation
+ tf_normalize = np.eye(4)
+ tf_normalize[:3,3] = translation
+ tf1 = np.eye(4)
+ tf1[:3,:3] *= sc_factor
+ tf_normalize = tf1@tf_normalize
+
+ pcd_all = pcd_real_scale
+
+ else:
+ pcd_all = prev_pcd_real_scale
+ for i in range(len(rgbs)):
+ pts, colors = compute_scene_bounds_worker(
+ None, K, glcam_in_obs[len(glcam_in_obs)-len(rgbs)+i], use_mask=True,
+ rgb=rgbs[i], depth=depths[i], mask=masks[i]
+ )
+ pcd_all += toOpen3dCloud(pts, colors)
+ pcd_all = pcd_all.voxel_down_sample(vox_res)
+ _, keep_mask = find_biggest_cluster(
+ np.asarray(pcd_all.points), eps=cfg_nerf['dbscan_eps'],
+ min_samples=cfg_nerf['dbscan_eps_min_samples']
+ )
+ keep_ids = np.arange(len(np.asarray(pcd_all.points)))[keep_mask]
+ pcd_all = pcd_all.select_by_index(keep_ids)
+
+ ########## Clear memory
+ rgbs_all = []
+ depths_all = []
+ normal_maps_all = []
+ masks_all = []
+ occ_masks_all = []
+ if contact_in_cam_dir is not None:
+ ps_all = []
+ sdfs_all = []
+ vs_all = []
+ sdf_bounds_all = []
+ else:
+ ps_all = None
+ sdfs_all = None
+ vs_all = None
+ sdf_bounds_all = None
+
+ pcd_normalized = copy.deepcopy(pcd_all)
+ pcd_normalized.transform(tf_normalize)
+ if normal_maps is not None and len(normal_maps)>0:
+ normal_maps = np.array(normal_maps)
+ else:
+ normal_maps = None
+ rgbs, depths, masks, normal_maps, poses, \
+ ps, sdfs, vs, sdf_bounds = preprocess_data(
+ np.array(rgbs), np.array(depths), np.array(masks),
+ normal_maps=normal_maps, poses=glcam_in_obs,
+ sc_factor=cfg_nerf['sc_factor'], translation=cfg_nerf['translation'],
+ ps=ps, sdfs=sdfs, vs=vs, sdf_bounds=sdf_bounds)
+
+ else:
+ logging.info(f"compute_scene_bounds, latest nerf frame {frame_id}")
+ sc_factor, translation, pcd_real_scale, pcd_normalized = \
+ compute_scene_bounds(None, glcam_in_obs, K, use_mask=True,
+ base_dir=cfg_nerf['nerf_temp_dir'],
+ rgbs=np.array(rgbs_all),
+ depths=np.array(depths_all),
+ masks=np.array(masks_all),
+ eps=cfg_nerf['dbscan_eps'],
+ min_samples=cfg_nerf['dbscan_eps_min_samples'])
+
+ cfg_nerf['sc_factor'] = float(sc_factor)
+ cfg_nerf['translation'] = translation
+
+ if normal_maps_all is not None and len(normal_maps_all)>0:
+ normal_maps = np.array(normal_maps_all)
+ else:
+ normal_maps = None
+
+ logging.info(f"preprocess_data, latest nerf frame {frame_id}")
+ rgbs, depths, masks, normal_maps, poses, \
+ ps, sdfs, vs, sdf_bounds = preprocess_data(
+ np.array(rgbs_all), np.array(depths_all), np.array(masks_all),
+ normal_maps=normal_maps, poses=glcam_in_obs,
+ sc_factor=cfg_nerf['sc_factor'], translation=cfg_nerf['translation'],
+ ps=ps_all, sdfs=sdfs_all, vs=vs_all, sdf_bounds=sdf_bounds_all)
+
+ # cfg_nerf['sampled_frame_ids'] = np.arange(len(rgbs_all))
+
+
+ if SPDLOG>=2:
+ np.savetxt(f"{cfg_nerf['nerf_temp_dir']}/trainval_poses.txt",
+ glcam_in_obs.reshape(-1,4))
+ np.savetxt(f"{debug_dir}/{frame_id}/poses_before_nerf.txt",
+ np.array(cam_in_obs).reshape(-1,4))
+
+ if len(occ_masks_all)>0:
+ if cfg_nerf['continual']:
+ occ_masks = np.array(occ_masks)
+ else:
+ occ_masks = np.array(occ_masks_all)
+ else:
+ occ_masks = None
+
+ if cnt_nerf==0:
+ logging.info(f"First nerf run, create Runner, latest nerf frame {frame_id}")
+ nerf = NerfRunner(cfg_nerf, rgbs, depths=depths, masks=masks,
+ normal_maps=normal_maps, occ_masks=occ_masks,
+ poses=poses, K=K, build_octree_pcd=pcd_normalized,
+ ps=ps, sdfs=sdfs, vs=vs, sdf_bounds=sdf_bounds)
+ else:
+ if cfg_nerf['continual']:
+ logging.info(f"add_new_frames, latest nerf frame {frame_id}")
+ logging.info(f"add_new_frames: len(rgbs) = {len(rgbs)}, len(poses)= {len(poses)}")
+ nerf.add_new_frames(rgbs, depths, masks, normal_maps, poses,
+ occ_masks=occ_masks, new_pcd=pcd_normalized,
+ reuse_weights=False, ps=ps, sdfs=sdfs, vs=vs,
+ sdf_bounds=sdf_bounds)
+ else:
+ nerf = NerfRunner(cfg_nerf, rgbs, depths=depths, masks=masks,
+ normal_maps=normal_maps, occ_masks=occ_masks,
+ poses=poses, K=K, build_octree_pcd=pcd_normalized,
+ ps=ps, sdfs=sdfs, vs=vs, sdf_bounds=sdf_bounds)
+
+ logging.info(f"Start training, latest nerf frame {frame_id}")
+ nerf.train()
+ logging.info(f"Training done, latest nerf frame {frame_id}")
+
+ optimized_cvcam_in_obs, offset = get_optimized_poses_in_real_world(
+ poses, nerf.models['pose_array'], cfg_nerf['sc_factor'],
+ cfg_nerf['translation'])
+ logging.info(f"Number of optimized poses is {len(optimized_cvcam_in_obs)}")
+ logging.info(f"Number of optimized poses input is {len(poses)}")
+
+ logging.info("Getting mesh")
+ mesh = nerf.extract_mesh(isolevel=0, voxel_size=cfg_nerf['mesh_resolution'])
+ if mesh is None:
+ logging.info("No mesh extracted")
+ else:
+ mesh = mesh_to_real_world(mesh, pose_offset=offset,
+ translation=nerf.cfg['translation'],
+ sc_factor=nerf.cfg['sc_factor'])
+
+ with lock:
+ p_dict['optimized_cvcam_in_obs'] = optimized_cvcam_in_obs
+ p_dict['running'] = False
+ # p_dict['nerf_last'] = nerf #!NOTE not pickable
+ p_dict['mesh'] = mesh
+
+ logging.info(f"nerf done at frame {frame_id}")
+
+ if cfg_nerf['continual']:
+ prev_pcd_real_scale = pcd_all.voxel_down_sample(vox_res)
+
+ ####### Log
+ if SPDLOG>=2:
+ os.system(f"cp -r {cfg_nerf['nerf_temp_dir']}/image_step_*.png " + \
+ f"{out_dir}/")
+ with open(f"{out_dir}/config.yml",'w') as ff:
+ tmp = copy.deepcopy(cfg_nerf)
+ for k in tmp.keys():
+ if isinstance(tmp[k],np.ndarray):
+ tmp[k] = tmp[k].tolist()
+ yaml.dump(tmp,ff)
+ shutil.copy(f"{out_dir}/config.yml",f"{cfg_nerf['nerf_temp_dir']}/")
+ np.savetxt(f"{debug_dir}/{frame_id}/poses_after_nerf.txt",
+ np.array(optimized_cvcam_in_obs).reshape(-1,4))
+ logging.info(f'>>>>>>>>>>>>> saving opt poses to {debug_dir}/' + \
+ f'{frame_id}/poses_after_nerf.txt')
+ if mesh is not None:
+ mesh.export(f"{cfg_nerf['nerf_temp_dir']}/mesh_real_world.obj")
+ os.system(f"rm -rf " + \
+ f"{cfg_nerf['nerf_temp_dir']}/step_*_mesh_real_world.obj " + \
+ f"{cfg_nerf['nerf_temp_dir']}/*frame*ray*.ply && " + \
+ f"mv {cfg_nerf['nerf_temp_dir']}/* {out_dir}/")
+
+
+
+
+class BundleSdf:
+ def __init__(self, cfg_track_yaml=None, cfg_nerf_yaml=None,
+ start_nerf_keyframes=10, translation=None, sc_factor=None,
+ use_gui=False):
+ assert cfg_track_yaml is not None
+ assert cfg_nerf_yaml is not None
+
+ with open(cfg_track_yaml,'r') as ff:
+ self.cfg_track = yaml.load(ff)
+ self.debug_dir = self.cfg_track["debug_dir"]
+ self.SPDLOG = self.cfg_track["SPDLOG"]
+ self.start_nerf_keyframes = start_nerf_keyframes
+ self.use_gui = use_gui
+ self.translation = None
+ self.sc_factor = None
+ if sc_factor is not None:
+ self.translation = translation
+ self.sc_factor = sc_factor
+
+ self.toss_frames = self.cfg_track['toss_frames']
+
+ with open(cfg_nerf_yaml,'r') as ff:
+ self.cfg_nerf = yaml.load(ff)
+ self.cfg_nerf['notes'] += '' # += so existing notes aren't overwritten.
+ self.cfg_nerf['bounding_box'] = np.array(
+ self.cfg_nerf['bounding_box']).reshape(2,3)
+ self.nerf_dir = self.cfg_nerf['nerf_dir']
+ self.contact_in_cam_dir = self.cfg_nerf['contact_in_cam_dir']
+
+ self.manager = multiprocessing.Manager()
+
+ if self.use_gui:
+ self.gui_lock = multiprocessing.Lock()
+ self.gui_dict = self.manager.dict()
+ self.gui_dict['join'] = False
+ self.gui_dict['started'] = False
+ self.gui_worker = multiprocessing.Process(
+ target=run_gui, args=(self.gui_dict, self.gui_lock))
+ self.gui_worker.start()
+ else:
+ self.gui_lock = None
+ self.gui_dict = None
+
+ self.p_dict = self.manager.dict()
+ self.kf_to_nerf_list = self.manager.list()
+ self.lock = multiprocessing.Lock()
+ self.p_dict['running'] = False
+ self.p_dict['join'] = False
+ self.p_dict['nerf_num_frames'] = 0
+
+ self.p_dict['SPDLOG'] = self.SPDLOG
+ self.p_nerf = multiprocessing.Process(
+ target=run_nerf,
+ args=(self.p_dict, self.kf_to_nerf_list, self.lock, self.cfg_nerf,
+ self.translation, self.sc_factor, start_nerf_keyframes,
+ self.use_gui, self.gui_lock, self.gui_dict, self.debug_dir, self.contact_in_cam_dir)
+ )
+ self.p_nerf.start()
+
+ # self.p_dict = {}
+ # self.lock = threading.Lock()
+ # self.p_dict['running'] = False
+ # self.p_dict['join'] = False
+ # self.p_nerf = threading.Thread(target=self.run_nerf,
+ # args=(self.p_dict, self.lock))
+ # self.p_nerf.start()
+
+ yml = my_cpp.YamlLoadFile(cfg_track_yaml)
+ self.bundler = my_cpp.Bundler(yml)
+ self.loftr = LoftrRunner()
+ self.cnt = -1
+ self.K = None
+ self.mesh = None
+
+
+ def on_finish(self):
+ if self.use_gui:
+ with self.gui_lock:
+ self.gui_dict['join'] = True
+ self.gui_worker.join()
+
+ with self.lock:
+ self.p_dict['join'] = True
+ self.p_nerf.join()
+ with self.lock:
+ if self.p_dict['running']==False and 'optimized_cvcam_in_obs' in self.p_dict:
+ for i_f in range(len(self.p_dict['optimized_cvcam_in_obs'])):
+ self.bundler._keyframes[i_f]._pose_in_model = \
+ self.p_dict['optimized_cvcam_in_obs'][i_f]
+ self.bundler._keyframes[i_f]._nerfed = True
+ del self.p_dict['optimized_cvcam_in_obs']
+
+
+ def make_frame(self, color, depth, K, id_str, mask=None, occ_mask=None,
+ pose_in_model=np.eye(4)):
+ H,W = color.shape[:2]
+ roi = [0,W-1,0,H-1]
+ frame = my_cpp.Frame(color, depth, roi, pose_in_model, self.cnt, id_str, K,
+ self.bundler.yml)
+ if mask is not None:
+ frame._fg_mask = my_cpp.cvMat(mask)
+ if occ_mask is not None:
+ frame._occ_mask = my_cpp.cvMat(occ_mask)
+ return frame
+
+
+ def find_corres(self, frame_pairs):
+ logging.info(f"frame_pairs: {len(frame_pairs)}")
+ is_match_ref = len(frame_pairs)==1 and \
+ frame_pairs[0][0]._ref_frame_id==frame_pairs[0][1]._id and \
+ self.bundler._newframe==frame_pairs[0][0]
+
+ imgs, tfs, query_pairs = \
+ self.bundler._fm.getProcessedImagePairs(frame_pairs)
+ imgs = np.array([np.array(img) for img in imgs])
+
+ if len(query_pairs)==0:
+ return
+
+ corres = self.loftr.predict(rgbAs=imgs[::2], rgbBs=imgs[1::2])
+ for i_pair in range(len(query_pairs)):
+ cur_corres = corres[i_pair][:,:4]
+ tfA = np.array(tfs[i_pair*2])
+ tfB = np.array(tfs[i_pair*2+1])
+ cur_corres[:,:2] = transform_pts(cur_corres[:,:2], np.linalg.inv(tfA))
+ cur_corres[:,2:4] = transform_pts(cur_corres[:,2:4], np.linalg.inv(tfB))
+ self.bundler._fm._raw_matches[query_pairs[i_pair]] = \
+ cur_corres.round().astype(np.uint16)
+
+ min_match_with_ref = self.cfg_track["feature_corres"]["min_match_with_ref"]
+
+ if is_match_ref and \
+ len(self.bundler._fm._raw_matches[frame_pairs[0]])0:
+ ref_frame = self.bundler._frames[list(self.bundler._frames.keys())[-1]]
+ frame._ref_frame_id = ref_frame._id
+ frame._pose_in_model = ref_frame._pose_in_model
+ logging.info(f"pose_in_model")
+ else:
+ self.bundler._firstframe = frame
+
+ frame.invalidatePixelsByMask(frame._fg_mask)
+ logging.info(f"{frame._id}")
+ logging.info(f"test{np.array(frame._pose_in_model)}")
+ if frame._id==0 and \
+ np.abs(np.array(frame._pose_in_model)-np.eye(4)).max()<=1e-4:
+ logging.info(f"first frame, set new init coordinate")
+ frame.setNewInitCoordinate()
+ logging.info(f"new coordinate")
+
+
+ n_fg = (np.array(frame._fg_mask)>0).sum()
+ logging.info(f"n_fg{n_fg}")
+ if n_fg<100:
+ logging.info(f"Frame {frame._id_str} cloud is empty, marked FAIL, ' + \
+ f'roi={n_fg}")
+ frame._status = my_cpp.Frame.FAIL;
+ self.bundler.forgetFrame(frame)
+ return
+
+ if self.cfg_track["depth_processing"]["denoise_cloud"]:
+ frame.pointCloudDenoise()
+ logging.info(f"denoise")
+ n_valid = frame.countValidPoints()
+ n_valid_first = self.bundler._firstframe.countValidPoints()
+ if n_valid=min_match_with_ref:
+ logging.info(f"re-choose new ref frame to {kf._id_str}")
+ found = True
+ break
+
+ if not found:
+ frame._status = my_cpp.Frame.FAIL
+ logging.info(f"frame {frame._id_str} has not suitable ref_frame, " + \
+ f"mark as FAIL")
+ self.bundler.forgetFrame(frame)
+ return
+
+ logging.info(f"frame {frame._id_str} pose update before\n" + \
+ f"{frame._pose_in_model.round(3)}")
+ offset = self.bundler._fm.procrustesByCorrespondence(frame, ref_frame)
+ frame._pose_in_model = offset@frame._pose_in_model
+ logging.info(f"frame {frame._id_str} pose update after\n" + \
+ f"{frame._pose_in_model.round(3)}")
+
+ window_size = self.cfg_track["bundle"]["window_size"]
+ if len(self.bundler._frames)-len(self.bundler._keyframes)>window_size:
+ for k in self.bundler._frames:
+ f = self.bundler._frames[k]
+ isforget = self.bundler.forgetFrame(f)
+ if isforget:
+ logging.info(f"exceed window size, forget frame {f._id_str}")
+ break
+
+ self.bundler._frames[frame._id] = frame
+
+ self.bundler.selectKeyFramesForBA()
+
+ local_frames = self.bundler._local_frames
+
+ pairs = self.bundler.getFeatureMatchPairs(self.bundler._local_frames)
+ self.find_corres(pairs)
+ if frame._status==my_cpp.Frame.FAIL:
+ self.bundler.forgetFrame(frame)
+ return
+
+ find_matches = False
+ self.bundler.optimizeGPU(local_frames, find_matches)
+
+ if frame._status==my_cpp.Frame.FAIL:
+ self.bundler.forgetFrame(frame)
+ return
+
+ self.bundler.checkAndAddKeyframe(frame)
+
+
+
+ def run(self, color, depth, K, id_str, mask=None, occ_mask=None,
+ pose_in_model=np.eye(4)):
+ self.cnt += 1
+
+ if self.K is None:
+ self.K = K
+ with self.lock:
+ self.p_dict['K'] = self.K
+
+ if self.use_gui:
+ while 1:
+ with self.gui_lock:
+ started = self.gui_dict['started']
+ if not started:
+ time.sleep(1)
+ logging.info("Waiting for GUI")
+ continue
+ break
+
+ H,W = color.shape[:2]
+ # depth = depth * 0.95
+ percentile = self.cfg_track['depth_processing']["percentile"]
+ # percentile = 100
+ if percentile<100: # Denoise
+ logging.info("percentile denoise start")
+ valid = (depth>=0.1) & (mask>0)
+ thres = np.percentile(depth[valid], percentile)
+ depth[depth>=thres] = 0
+ logging.info("percentile denoise done")
+
+ frame = self.make_frame(color, depth, K, id_str, mask, occ_mask,
+ pose_in_model)
+ os.makedirs(op.join(self.debug_dir, frame._id_str), exist_ok=True)
+
+ logging.info(f"processNewFrame start {frame._id_str}")
+ # self.bundler.processNewFrame(frame)
+ self.process_new_frame(frame)
+ logging.info(f"processNewFrame done {frame._id_str}")
+
+ if self.bundler._keyframes[-1]==frame:
+ logging.info(f"{frame._id_str} prepare data for nerf")
+
+ with self.lock:
+ self.p_dict['frame_id'] = frame._id_str
+ self.p_dict['running'] = True
+ self.kf_to_nerf_list.append({
+ 'rgb': np.array(frame._color).reshape(H,W,3)[...,::-1].copy(),
+ 'depth': np.array(frame._depth).reshape(H,W).copy(),
+ 'mask': np.array(frame._fg_mask).reshape(H,W).copy(),
+ # 'occ_mask': occ_mask.reshape(H,W),
+ # 'normal_map': np.array(frame._normal_map).copy(),
+ 'occ_mask': None,
+ 'normal_map': None,
+ })
+ cam_in_obs = []
+ for f in self.bundler._keyframes:
+ cam_in_obs.append(np.array(f._pose_in_model).copy())
+ self.p_dict['cam_in_obs'] = np.array(cam_in_obs)
+
+ if self.SPDLOG>=2:
+ with open(op.join(self.debug_dir, frame._id_str, 'nerf_frames.txt'),
+ 'w') as ff:
+ for f in self.bundler._keyframes:
+ ff.write(f"{f._id_str}\n")
+
+ ############# Wait for sync
+ while 1:
+ with self.lock:
+ running = self.p_dict['running']
+ nerf_num_frames = self.p_dict['nerf_num_frames']
+ if not running:
+ break
+ if len(self.bundler._keyframes)-nerf_num_frames>=self.cfg_nerf['sync_max_delay']:
+ time.sleep(0.01)
+ # logging.info(f"wait for sync len(self.bundler._keyframes):{len(self.bundler._keyframes)}, nerf_num_frames:{nerf_num_frames}")
+ continue
+ break
+
+ rematch_after_nerf = self.cfg_track["feature_corres"]["rematch_after_nerf"]
+ logging.info(f"rematch_after_nerf: {rematch_after_nerf}")
+ frames_large_update = []
+ with self.lock:
+ if 'optimized_cvcam_in_obs' in self.p_dict:
+ for i_f in tqdm(range(len(self.p_dict['optimized_cvcam_in_obs'])), desc="sync nerf pose to tracking"):
+ if rematch_after_nerf:
+ trans_update = np.linalg.norm(
+ self.p_dict['optimized_cvcam_in_obs'][i_f][:3,3] - \
+ self.bundler._keyframes[i_f]._pose_in_model[:3,3]
+ )
+ rot_update = geodesic_distance(
+ self.p_dict['optimized_cvcam_in_obs'][i_f][:3,:3],
+ self.bundler._keyframes[i_f]._pose_in_model[:3,:3]
+ )
+ if trans_update>=0.005 or rot_update>=5/180.0*np.pi:
+ frames_large_update.append(self.bundler._keyframes[i_f])
+ logging.info(f"{self.bundler._keyframes[i_f]._id_str}, " + \
+ f"trans_update={trans_update}, " + \
+ f"rot_update={rot_update}")
+ self.bundler._keyframes[i_f]._pose_in_model = \
+ self.p_dict['optimized_cvcam_in_obs'][i_f]
+ self.bundler._keyframes[i_f]._nerfed = True
+ logging.info(f"synced pose from nerf, latest nerf frame " + \
+ f"{self.bundler._keyframes[len(self.p_dict['optimized_cvcam_in_obs'])-1]._id_str}")
+ del self.p_dict['optimized_cvcam_in_obs']
+
+ if self.use_gui:
+ with self.gui_lock:
+ if 'mesh' in self.p_dict:
+ self.gui_dict['mesh'] = self.p_dict['mesh']
+ del self.p_dict['mesh']
+
+ if rematch_after_nerf:
+ if len(frames_large_update)>0:
+ with self.lock:
+ nerf_num_frames = self.p_dict['nerf_num_frames']
+ logging.info(f"before matches keys: {len(self.bundler._fm._matches)}")
+ ks = list(self.bundler._fm._matches.keys())
+ for k in ks:
+ if k[0] in frames_large_update or k[1] in frames_large_update:
+ del self.bundler._fm._matches[k]
+ logging.info(f"Delete match between {k[0]._id_str} and " + \
+ f"{k[1]._id_str}")
+ logging.info(f"after matches keys: {len(self.bundler._fm._matches)}")
+
+ # Calls to saveNewFrameResult write to ob_in_cam/ result subfolder.
+ self.bundler.saveNewframeResult()
+ if self.SPDLOG>=2 and occ_mask is not None:
+ os.makedirs(op.join(self.debug_dir, 'occ_mask'), exist_ok=True)
+ cv2.imwrite(op.join(self.debug_dir, 'occ_mask', f'{frame._id_str}.png'),
+ occ_mask)
+
+ if self.use_gui:
+ ob_in_cam = np.linalg.inv(frame._pose_in_model)
+ with self.gui_lock:
+ self.gui_dict['color'] = color[...,::-1]
+ self.gui_dict['mask'] = mask
+ self.gui_dict['ob_in_cam'] = ob_in_cam
+ self.gui_dict['id_str'] = frame._id_str
+ self.gui_dict['K'] = self.K
+ self.gui_dict['n_keyframe'] = len(self.bundler._keyframes)
+
+
+
+ def run_global_nerf(self, reader=None, get_texture=False, tex_res=1024):
+ '''
+ @reader: data reader, sometimes we want to use the full resolution raw image
+ '''
+ self.K = np.loadtxt(op.join(self.debug_dir, 'cam_K.txt')).reshape(3,3)
+
+ tmp = sorted(glob.glob(op.join(self.debug_dir, 'ob_in_cam', '*')))
+ last_stamp = os.path.basename(tmp[-1]).replace('.txt','')
+ logging.info(f'last_stamp {last_stamp}')
+ keyframes_yml = op.join(self.debug_dir, last_stamp, 'keyframes.yml')
+ keyframes = yaml.load(open(keyframes_yml,'r'))
+ logging.info(f"keyframes#: {len(keyframes)}")
+ keys = list(keyframes.keys())
+ if len(keyframes)>self.cfg_nerf['n_train_image']:
+ keys = [keys[0]] + list(
+ np.random.choice(keys, self.cfg_nerf['n_train_image'], replace=False))
+ keys = list(set(keys))
+ logging.info(f"frame_ids too large, select subset num: {len(keys)}")
+
+ frame_ids = []
+ for k in keys:
+ frame_ids.append(k.replace('keyframe_',''))
+
+ cam_in_obs = []
+ for k in keys:
+ cam_in_ob = np.array(keyframes[k]['cam_in_ob']).reshape(4,4)
+ cam_in_obs.append(cam_in_ob)
+ cam_in_obs = np.array(cam_in_obs)
+
+ # The out directory is the nerf subdirectory of the NeRF config's nerf_dir.
+ out_dir = op.join(self.nerf_dir, 'nerf')
+ file_utils.remove_and_add_directory(out_dir)
+ file_utils.remove_and_add_directory(op.join(self.nerf_dir, 'used_rgbs'))
+
+ rgbs = []
+ depths = []
+ normal_maps = []
+ masks = []
+ occ_masks = []
+ if self.contact_in_cam_dir is not None:
+ ps = []
+ sdfs = []
+ vs = []
+ sdf_bounds = []
+ else:
+ ps = None
+ sdfs = None
+ vs = None
+ sdf_bounds = None
+ for frame_id in frame_ids:
+ if reader is not None:
+ self.K = reader.K.copy()
+ id = reader.id_strs.index(frame_id)
+ rgbs.append(reader.get_color(id))
+ depths.append(reader.get_depth(id))
+ masks.append(reader.get_mask(id))
+ else:
+ # Images have been downscaled in tracking outputs
+ self.cfg_nerf['down_scale_ratio'] = 1
+ rgb_file = op.join(self.debug_dir, 'color_segmented', f'{frame_id}.png')
+ shutil.copy(rgb_file, op.join(self.nerf_dir, 'used_rgbs'))
+ rgb = imageio.imread(rgb_file)
+ depth = cv2.imread(
+ rgb_file.replace('color_segmented','depth_filtered'),-1)/1e3
+ mask = cv2.imread(rgb_file.replace('color_segmented','mask'),-1)
+ rgbs.append(rgb)
+ depths.append(depth)
+ masks.append(mask)
+
+ if self.contact_in_cam_dir is not None:
+ if f'{frame_id}' in os.listdir(op.join(self.contact_in_cam_dir, 'from_support_points')):
+ ps_path = op.join(self.contact_in_cam_dir, 'from_support_points', f'{frame_id}', 'ps.pt')
+ sdfs_path = op.join(self.contact_in_cam_dir, 'from_support_points', f'{frame_id}', 'sdfs.pt')
+ ps.append(torch.load(ps_path))
+ sdfs.append(torch.load(sdfs_path))
+ else:
+ ps.append(None)
+ sdfs.append(None)
+ if f'{frame_id}' in os.listdir(op.join(self.contact_in_cam_dir, 'from_mesh_surface')): # could be a smaller set
+ vs_path = op.join(self.contact_in_cam_dir, 'from_mesh_surface', f'{frame_id}', 'vs.pt')
+ sdf_bounds_path = op.join(self.contact_in_cam_dir, 'from_mesh_surface', f'{frame_id}', 'sdf_bounds.pt')
+ vs.append(torch.load(vs_path))
+ sdf_bounds.append(torch.load(sdf_bounds_path))
+ else:
+ vs.append(None)
+ sdf_bounds.append(None)
+
+ # T^{OBJ}_{GLCAM} = T^{OBS}_{CVCAM} * T^{CVCAM}_{GLCAM}
+ # T^{A}_{B} is the transform from A to B and the pose of B in A.
+ # T_{B} = T_{A} * T^{A}_{B}
+ # p^{A} = T^{A}_{B} * p^{B}
+ glcam_in_obs = cam_in_obs@GLCAM_IN_CVCAM
+
+ self.cfg_nerf['sc_factor'] = None
+ self.cfg_nerf['translation'] = None
+
+ ######### Reuse normalization
+ # Get the most recent config.yml from any existing XXXX/nerf directories.
+ files = sorted(glob.glob(
+ f"{self.debug_dir}/[0-9][0-9][0-9][0-9]/nerf/config.yml", recursive=True))
+ if len(files)>0:
+ tmp = yaml.load(open(files[-1],'r'))
+ self.cfg_nerf['sc_factor'] = float(tmp['sc_factor'])
+ self.cfg_nerf['translation'] = np.array(tmp['translation'])
+
+ if self.cfg_nerf['geometry_dir'] is not None:
+ ### Regardless of contact_in_cam, use all support points to compute the scene bounds
+ ps_ = file_utils.load_geometry_support_points(self.cfg_nerf['geometry_dir'])
+ else:
+ ps_ = None
+
+ # {OBJ} is used as the {WORLD} frame in compute_scene_bounds()
+ # The points from all keyframes are used to compute the scene bounds
+ sc_factor, translation, pcd_real_scale, pcd_normalized = \
+ compute_scene_bounds(None, glcam_in_obs, self.K, use_mask=True,
+ base_dir=self.cfg_nerf['nerf_temp_dir'],
+ rgbs=np.array(rgbs), depths=np.array(depths),
+ masks=np.array(masks), eps=0.01, min_samples=5,
+ sc_factor=self.cfg_nerf['sc_factor'],
+ translation_cvcam=self.cfg_nerf['translation'],
+ ps_contact=ps_)
+
+ self.cfg_nerf['sc_factor'] = float(sc_factor)
+ self.cfg_nerf['translation'] = translation
+
+ if normal_maps is not None and len(normal_maps)>0:
+ normal_maps = np.array(normal_maps)
+ else:
+ normal_maps = None
+
+ rgbs_raw = np.array(rgbs).copy()
+ rgbs, depths, masks, normal_maps, poses, \
+ ps, sdfs, vs, sdf_bounds = preprocess_data(
+ np.array(rgbs), depths=np.array(depths), masks=np.array(masks),
+ normal_maps=normal_maps, poses=glcam_in_obs,
+ sc_factor=self.cfg_nerf['sc_factor'],
+ translation=self.cfg_nerf['translation'],
+ ps=ps, sdfs=sdfs, vs=vs, sdf_bounds=sdf_bounds)
+
+ self.cfg_nerf['sampled_frame_ids'] = np.arange(len(rgbs))
+
+ np.savetxt(f"{self.cfg_nerf['nerf_temp_dir']}/trainval_poses.txt",
+ glcam_in_obs.reshape(-1,4))
+
+ if len(occ_masks)>0:
+ occ_masks = np.array(occ_masks)
+ else:
+ occ_masks = None
+ nerf = NerfRunner(self.cfg_nerf, rgbs, depths=depths, masks=masks,
+ normal_maps=normal_maps, occ_masks=occ_masks, poses=poses,
+ K=self.K, build_octree_pcd=pcd_normalized,
+ ps=ps, sdfs=sdfs, vs=vs, sdf_bounds=sdf_bounds, )
+ print("Start training")
+ nerf.train()
+ optimized_cvcam_in_obs, offset = get_optimized_poses_in_real_world(
+ poses, nerf.models['pose_array'], self.cfg_nerf['sc_factor'],
+ self.cfg_nerf['translation'])
+
+ ####### Log
+ os.system(f"cp -r {self.cfg_nerf['nerf_temp_dir']}/image_step_*.png {out_dir}/")
+ with open(f"{out_dir}/config.yml",'w') as ff:
+ tmp = copy.deepcopy(self.cfg_nerf)
+ for k in tmp.keys():
+ if isinstance(tmp[k],np.ndarray):
+ tmp[k] = tmp[k].tolist()
+ yaml.dump(tmp,ff)
+ shutil.copy(f"{out_dir}/config.yml", f"{self.cfg_nerf['nerf_temp_dir']}/")
+ os.system(f"mv {self.cfg_nerf['nerf_temp_dir']}/* {out_dir}/ && rm -rf {out_dir}/step_*_mesh_real_world.obj {out_dir}/*frame*ray*.ply")
+
+ torch.cuda.empty_cache()
+
+ # Save NeRF results to NeRF directory instead of last keyframe.
+ np.savetxt(op.join(self.nerf_dir, 'poses_after_nerf.txt'),
+ np.array(optimized_cvcam_in_obs).reshape(-1,4))
+
+ # # Save the final model (see nerf_runner.py) to the NeRF directory.
+ # nerf.save_weights(
+ # out_file=op.join(self.nerf_dir, f'model_latest.pth'),
+ # models=nerf.models)
+
+ mesh, sigma, bounds, voxel_size, Nx = nerf.extract_mesh(
+ voxel_size=self.cfg_nerf['mesh_resolution'], isolevel=0,
+ return_sigma=True)
+
+ # Save the occupancy grid of the unit cube to NeRF directory from the trained sdf model.
+ print(f"bounds: {bounds}, \nvoxel_size: {voxel_size}, Nx: {Nx}")
+ sigma_flat = sigma.flatten()
+ occ_grid = sigma_flat<=0
+ occ_bits = np.packbits(occ_grid)
+ np.savez_compressed(op.join(self.nerf_dir, 'occ_grid.npz'),
+ occ_bits=occ_bits, bounds=bounds, voxel_size=voxel_size,
+ Nx=Nx, sc_factor=self.cfg_nerf['sc_factor'],
+ translation=self.cfg_nerf['translation'], offset=offset)
+
+ # Get just the largest mesh if there are multiple disconnected ones.
+ mesh.merge_vertices()
+
+ running_contact = self.cfg_nerf['contact_in_cam_dir'] is not None or \
+ self.cfg_nerf['geometry_dir'] is not None
+ if running_contact and self.cfg_nerf['convexity_weight'] == 0:
+ ms = trimesh_split(mesh, min_edge=5)
+ ### For each piece of mesh, determine if it is near contact points.
+ ### If it is, then we will keep it.
+ ### It it is the largest pice, we keep it.
+ ### Otherwise, we will discard it.
+ ### At the end, everything that is kept will be merged into a single mesh.
+ ### contact points: self.nerf.all_cps_sdf
+ largest_size = 0
+ largest = None
+ for m in ms:
+ # mean = m.vertices.mean(axis=0)
+ # if np.linalg.norm(mean)>=0.1*nerf.cfg['sc_factor']:
+ # continue
+ if m.vertices.shape[0]>largest_size:
+ largest_size = m.vertices.shape[0]
+ largest = m
+
+ ### Calculate the size of the largest mesh
+ vertices = largest.vertices
+ min_coords = vertices.min(axis=0)
+ max_coords = vertices.max(axis=0)
+ size = max_coords - min_coords
+ size_meter = size/self.cfg_nerf['sc_factor']
+ print(f"Size of the largest mesh: {size_meter} m")
+ d_to_contact_threshold = 0.01*self.cfg_nerf['sc_factor']
+ d_threshold_meter = d_to_contact_threshold/self.cfg_nerf['sc_factor']
+ print(f"Distance to contact threshold: {d_threshold_meter} m")
+
+ ms_preserve = []
+ ms_preserve.append(largest)
+ print(f"Preserve mesh with {largest.vertices.shape[0]} vertices")
+
+ contact_points = torch.cat(nerf.all_cps_sdf, 0)
+ contact_points = contact_points[:,:3]
+ count_preserved = 1
+ for m in ms:
+ if m.vertices.shape[0] == largest_size:
+ continue
+ mean = m.vertices.mean(axis=0)
+ # if np.linalg.norm(mean)>=0.1*self.cfg_nerf['sc_factor']:
+ # continue
+ dists = torch.cdist(torch.tensor(mean, dtype=torch.float32).unsqueeze(0), contact_points)
+ if dists.min()<=d_to_contact_threshold: # here 0.01 is in meters
+ ### Enlarge the mesh so that its size is at least 0.01*sc_factor
+ ### But we do not want to move the center of the mesh
+ vertices = m.vertices
+ min_coords = vertices.min(axis=0)
+ max_coords = vertices.max(axis=0)
+ size = max_coords - min_coords
+ size_norm = np.linalg.norm(size)
+ size_max = size.max()
+ size_norm_meter = size_norm/self.cfg_nerf['sc_factor']
+ center_meter = mean/self.cfg_nerf['sc_factor']
+ print(f"Size {size_meter} norm: {size_norm_meter} center {center_meter}")
+ scale = 0.01*self.cfg_nerf['sc_factor']/size_norm
+ # Here we reuse the value 0.01*self.cfg_nerf['sc_factor'] as the target scale,
+ # which is the same as the threshold for distance to contact points,
+ # but they have different meanings.
+ if scale > 1:
+ m.apply_scale(scale)
+ m.apply_translation(mean - m.vertices.mean(axis=0))
+
+ ms_preserve.append(m)
+ print(f"Preserve mesh with {m.vertices.shape[0]} vertices")
+ count_preserved += 1
+
+ mesh = trimesh.util.concatenate(ms_preserve)
+ print(f"Preserved {count_preserved} meshes")
+
+ else:
+ ms = trimesh_split(mesh, min_edge=100)
+ largest_size = 0
+ largest = None
+ for m in ms:
+ # mean = m.vertices.mean(axis=0)
+ # if np.linalg.norm(mean)>=0.1*nerf.cfg['sc_factor']:
+ # continue
+ if m.vertices.shape[0]>largest_size:
+ largest_size = m.vertices.shape[0]
+ largest = m
+ mesh = largest
+ mesh.export(op.join(self.nerf_dir, 'mesh_cleaned.obj'))
+
+ # Track the visibility of the mesh vertices
+ mesh_hull = mesh.convex_hull
+ mesh_hull_visible_mask, mesh_hull_invis_triangle_indices = \
+ nerf.track_mesh_visibility(mesh_hull)
+ np.savetxt(op.join(self.nerf_dir, 'mesh_hull_visible_mask.txt'),
+ mesh_hull_visible_mask.astype(int), fmt='%d')
+ np.savetxt(op.join(self.nerf_dir, 'mesh_hull_invis_triangle_indices.txt'),
+ mesh_hull_invis_triangle_indices.astype(int), fmt='%d')
+ mesh_hull_world = mesh_to_real_world(mesh_hull, pose_offset=offset,
+ translation=self.cfg_nerf['translation'],
+ sc_factor=self.cfg_nerf['sc_factor'])
+ mesh_hull_world.export(op.join(self.nerf_dir, 'mesh_hull_world.obj'))
+
+ mesh_visible_mask, mesh_invis_triangle_indices = \
+ nerf.track_mesh_visibility(mesh)
+ np.savetxt(op.join(self.nerf_dir, 'mesh_visible_mask.txt'),
+ mesh_visible_mask.astype(int), fmt='%d')
+ np.savetxt(op.join(self.nerf_dir, 'mesh_invis_triangle_indices.txt'),
+ mesh_invis_triangle_indices.astype(int), fmt='%d')
+ # code stuck with below
+ # if get_texture:
+ # mesh = nerf.mesh_texture_from_train_images(mesh, rgbs_raw=rgbs_raw, train_texture=False, tex_res=tex_res)
+ np.savetxt(op.join(self.nerf_dir, 'offset.txt'), offset)
+ mesh = mesh_to_real_world(mesh, pose_offset=offset,
+ translation=self.cfg_nerf['translation'],
+ sc_factor=self.cfg_nerf['sc_factor'])
+ mesh.export(op.join(self.nerf_dir, 'textured_mesh.obj'))
+
+ if gtsam is not None:
+ optimized_poses = self.optimize_all_poses(frame_ids, optimized_cvcam_in_obs,
+ keyframe_prior_noise=0.01, between_factor_noise=0.1)
+ else:
+ optimized_poses = None
+ logging.warning("GTSAM is not available, skipping final pose optimization.")
+
+ def optimize_all_poses(self, keyframe_indices, optimized_keyframe_poses,
+ keyframe_prior_noise=0.01, between_factor_noise=0.1):
+ """
+ Optimize all poses based on optimized keyframes and relative pose constraints
+
+ Args:
+ # all_poses: List of 4x4 transformation matrices for all frames (ob_in_cam)
+ keyframe_indices: List of indices that are keyframes
+ optimized_keyframe_poses: List of 4x4 transformation matrices for optimized keyframes (optimized_cvcam_in_obs inverse)
+ keyframe_prior_noise: Noise for keyframe prior factors
+ between_factor_noise: Noise for between factors
+
+ Returns:
+ List of optimized 4x4 transformation matrices for all frames
+ """
+ # Create factor graph
+ graph = gtsam.NonlinearFactorGraph()
+
+ # Create initial values
+ initial_values = gtsam.Values()
+
+ keyframe_indices = [int(frame_id) - 1 for frame_id in keyframe_indices]
+
+ # Convert numpy transforms to GTSAM Pose3
+ gtsam_poses = []
+ all_frame_poses_dir = op.join(self.debug_dir, 'ob_in_cam')
+ all_frames_files = sorted(glob.glob(op.join(all_frame_poses_dir, '*.txt')))
+ for pose_file in all_frames_files:
+ frame_id = os.path.basename(pose_file).replace('.txt','')
+ pose_ob_in_cam = np.loadtxt(pose_file).reshape(4,4)
+ R = pose_ob_in_cam[:3, :3]
+ t = pose_ob_in_cam[:3, 3]
+ gtsam_pose = gtsam.Pose3(gtsam.Rot3(R), t)
+ gtsam_poses.append(gtsam_pose)
+
+
+ # Convert optimized keyframe poses to GTSAM Pose3
+ gtsam_optimized_keyframe_poses = []
+ # optimized_keyframe_poses_path = op.join(self.nerf_dir, 'poses_after_nerf.txt')
+ # keyframe_poses = np.loadtxt(optimized_keyframe_poses_path).reshape(-1,4,4)
+ for pose in optimized_keyframe_poses:
+ pose_ob_in_cam = np.linalg.inv(pose)
+ R = pose_ob_in_cam[:3, :3]
+ t = pose_ob_in_cam[:3, 3]
+ gtsam_pose = gtsam.Pose3(gtsam.Rot3(R), t)
+ gtsam_optimized_keyframe_poses.append(gtsam_pose)
+
+ # Add all poses to initial values
+ for i, pose in enumerate(gtsam_poses):
+ initial_values.insert(i, pose)
+
+ # Add prior factors for optimized keyframes
+ keyframe_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([keyframe_prior_noise]*6))
+ for i, kf_idx in enumerate(keyframe_indices):
+ graph.add(gtsam.PriorFactorPose3(kf_idx, gtsam_optimized_keyframe_poses[i], keyframe_noise))
+
+ # Add between factors for consecutive frames
+ between_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([between_factor_noise]*6))
+ for i in range(len(gtsam_poses) - 1):
+ relative_pose = gtsam_poses[i].between(gtsam_poses[i+1])
+ graph.add(gtsam.BetweenFactorPose3(i, i+1, relative_pose, between_noise))
+
+ # Create optimizer and optimize
+ params = gtsam.LevenbergMarquardtParams()
+ optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_values, params)
+ result = optimizer.optimize()
+
+ # Extract optimized poses
+ optimized_poses = []
+ for i in range(len(gtsam_poses)):
+ optimized_pose = result.atPose3(i)
+ R = optimized_pose.rotation().matrix()
+ t = optimized_pose.translation()
+ T = np.eye(4)
+ T[:3, :3] = R
+ T[:3, 3] = t
+ optimized_poses.append(T)
+
+ # Write optimized poses to file
+ optimized_poses_path = op.join(self.nerf_dir, 'poses_all_frame_after_nerf.txt')
+ np.savetxt(optimized_poses_path, np.array(optimized_poses).reshape(-1,4))
+ logging.info(f"Optimized poses saved to {optimized_poses_path}")
+
+ return optimized_poses
+
+if __name__=="__main__":
+ set_seed(0)
+ torch.set_default_tensor_type('torch.cuda.FloatTensor')
+
+ cfg_nerf = file_utils.load_base_ho3d_bundlesdf_configuration()
+ cfg_nerf['video_dir'] = \
+ '/mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/MPM13'
+ cfg_nerf['SPDLOG'] = 1
+
+ cfg_track_dir = '/tmp/config.yml'
+ yaml.dump(cfg_nerf, open(cfg_track_dir,'w'))
+ tracker = BundleSdf(cfg_track_yaml=cfg_track_dir)
+ reader = Ho3dReader(tracker.bundler.yml["video_dir"].Scalar())
+
+ file_utils.remove_and_add_directory(tracker.debug_dir)
+
+ for i,color_file in enumerate(reader.color_files):
+ color = cv2.imread(color_file)
+ depth = reader.get_depth(i)
+ id_str = reader.id_strs[i]
+ occ_mask = reader.get_occ_mask(i)
+ tracker.run(color, depth, reader.K, id_str, occ_mask=occ_mask)
+
+ print("Done")
diff --git a/bundlenets/contact_loss_utils.py b/bundlenets/contact_loss_utils.py
new file mode 100644
index 0000000..8681586
--- /dev/null
+++ b/bundlenets/contact_loss_utils.py
@@ -0,0 +1,102 @@
+import torch
+from torch import Tensor
+import numpy as np
+
+def transform_points(points: Tensor, transformation_matrix: Tensor):
+ ones = torch.ones((points.shape[0], 1))
+ points_homogeneous = torch.hstack([points, ones])
+ transformed_points = points_homogeneous @ transformation_matrix.T
+ return transformed_points[:, :3]
+
+def inverse_homogeneous_transformation(transform: Tensor) -> Tensor:
+ """Produce the inverse of a homogeneous transform. If a homogeneous
+ transformation matrix is of the form:
+
+ T = [ R d ]
+ [ 0 1 ]
+
+ for T (4,4), R (3,3), and d (3,1), then the inverse is:
+
+ inv(T) = [ R^T -R^T*d ]
+ [ 0 1 ]
+
+ """
+ assert transform.shape == (4, 4)
+
+ # Split into rotation and translation components.
+ rot_mat = transform[:3, :3]
+ translation = transform[:3, 3]
+
+ # Build the inverse.
+ inv_transform = np.zeros((4, 4))
+ inv_transform[:3, :3] = rot_mat.T
+ inv_transform[:3, 3] = -rot_mat.T @ translation
+ inv_transform[3, 3] = 1
+
+ return inv_transform
+
+def transform_pts_to_normalized_space(points, translation, sc_factor, offset):
+ """From Utils.py's mesh_to_real_world function, we've uncovered that the
+ conversion from the SDF function's space to real world space is:
+
+ # The basic structure from Utils.py's mesh_to_real_world implements:
+ geom_origin_pts = sdf_pts/sc_factor - translation
+ track_origin_pts = geom_origin_pts.apply(offset)
+
+ Thus, this function needs to do the opposite.
+
+ # Reverse order yields:
+ geom_origin_pts = track_origin_pts.apply(inv(offset))
+ sdf_pts = (geom_origin_pts + translation) * sc_factor
+ """
+ # The provided points are wrt the tracking body origin.
+ track_origin_pts = points
+
+ # Convert to wrt the geometry body origin, determined via the offset.
+ inv_offset = inverse_homogeneous_transformation(offset)
+ geom_origin_pts = transform_points(track_origin_pts, inv_offset)
+
+ # Convert to the scaled space of the SDF inputs.
+ sdf_pts = (geom_origin_pts + translation.reshape(1,3)) * sc_factor
+ return sdf_pts
+
+
+### Added functions for file structure overhaul ###
+def convert_meter_points_to_normalized_space_for_sdf(
+ points: Tensor, sc_factor: float, offset: Tensor, translation: Tensor,
+ bsdf_init_pose: Tensor
+) -> Tensor:
+ """Convert points represented in meters from the BundleSDF _tracking_ frame
+ to the normalized space, which is represented in normalized coordinates from
+ the BundleSDF _shape reconstruction_ frame.
+
+ Args:
+ points (N, 3): points in meters from the BundleSDF tracking origin.
+ sc_factor
+ offset (4, 4): 4x4 transformation matrix from the original BundleSDF
+ frame pose to the optimized first BundleSDF frame pose.
+ translation (3,): vector from the BundleSDF tracking origin to the
+ BundleSDF shape reconstruction origin.
+ bsdf_init_pose (4, 4): 4x4 transformation matrix of the first BundleSDF
+ origin pose in camera frame, from the BundleSDF results ob_in_cam
+ subfolder with 0001.txt in it.
+
+ Returns:
+ points_normalized (N, 3): points in normalized units from the
+ BundleSDF shape reconstruction origin.
+ """
+ bsdf_init_pose = torch.from_numpy(bsdf_init_pose)
+
+ points_normalized = transform_pts_to_normalized_space(
+ points=points, translation=translation, sc_factor=sc_factor,
+ offset=offset)
+
+ return points_normalized
+
+def convert_meter_sdfs_to_normalized_sdfs(
+ sdfs: Tensor, sc_factor: float, truncation: float
+) -> Tensor:
+ """Convert SDFs represented in meters to the double SDF normalized units,
+ which is represented in normalized coordinates also from the BundleSDF
+ frame."""
+ return sdfs * sc_factor / truncation
diff --git a/bundlenets/file_utils.py b/bundlenets/file_utils.py
new file mode 100644
index 0000000..ce6e9c5
--- /dev/null
+++ b/bundlenets/file_utils.py
@@ -0,0 +1,457 @@
+"""File utilities for BundleSDF."""
+
+import os
+import os.path as op
+import sys
+import time
+
+import numpy as np
+import torch
+from torch import Tensor
+
+try:
+ import ruamel.yaml
+ yaml = ruamel.yaml.YAML()
+except ImportError:
+ print(f'WARNING: Could not load ruamel in bundlenets/file_utils.py; skip.')
+
+
+BUNDLENETS_REPO_DIR = op.dirname(op.dirname(os.path.realpath(__file__)))
+if BUNDLENETS_REPO_DIR not in sys.path:
+ sys.path.append(BUNDLENETS_REPO_DIR)
+BUNDLESDF_SOURCE_DIR = op.join(BUNDLENETS_REPO_DIR, 'bundlenets')
+BUNDLETRACK_BUILD_DIR = op.join(BUNDLENETS_REPO_DIR, 'BundleTrack', 'build')
+if BUNDLETRACK_BUILD_DIR not in sys.path:
+ sys.path.append(BUNDLETRACK_BUILD_DIR)
+
+
+# SYSTEMS = ['cube', 'prism', 'toblerone', 'milk']
+# bakingbox, burger, cardboard, chocolate, cream, croc, crushedcan, duck, gallon, greencan, hotdog, icetray, mug, oatly, pinkcan, stapler, styrofoam, toothpaste
+# VISION_CUBE_SYSTEM = 'vision_cube'
+# VISION_PRISM_SYSTEM = 'vision_prism'
+# VISION_TOBLERONE_SYSTEM = 'vision_toblerone'
+# VISION_MILK_SYSTEM = 'vision_milk'
+# VISION_SYSTEMS = ['vision_bottle', VISION_CUBE_SYSTEM, 'vision_egg',
+# 'vision_half', VISION_MILK_SYSTEM, 'vision_napkin',
+# 'vision_cubeslow', 'vision_2022-11-18-15-10-24', 'vision_box',
+# VISION_PRISM_SYSTEM, VISION_TOBLERONE_SYSTEM,
+# 'vision_bakingbox', 'vision_burger', 'vision_cardboard',
+# 'vision_chocolate', 'vision_cream', 'vision_croc',
+# 'vision_crushedcan', 'vision_duck', 'vision_gallon',
+# 'vision_greencan', 'vision_hotdog', 'vision_icetray',
+# 'vision_mug', 'vision_oatly', 'vision_pinkcan', 'vision_stapler',
+# 'vision_styrofoam', 'vision_toothpaste']
+VISION_SYSTEMS = ['bottle', 'cube', 'egg', 'half', 'milk', 'napkin', 'cubeslow',
+ '2022-11-18-15-10-24', 'box', 'prism', 'toblerone',
+ 'bakingbox', 'burger', 'cardboard', 'chocolate', 'cream', 'croc',
+ 'crushedcan', 'duck', 'gallon', 'greencan', 'hotdog', 'icetray',
+ 'mug', 'oatly', 'pinkcan', 'stapler', 'styrofoam', 'toothpaste']
+# cubeslow: cube with the free-falling part removed
+# 2022-11-18-15-10-24: bundlesdf's demo milk data
+
+def check_valid_system(system: str) -> bool:
+ """Check if a system is valid."""
+ words = system.split('_')
+ for word in words:
+ if 'vision' in word:
+ continue
+ if 'robot' in word:
+ continue
+ if word in VISION_SYSTEMS:
+ return True
+ return False
+
+def assure_created(directory: str) -> str:
+ """Wrapper to put around directory paths which ensure their existence.
+
+ Args:
+ directory: Path of directory that may not exist.
+
+ Returns:
+ ``directory``, Which is ensured to exist by recursive mkdir.
+ """
+ directory = op.abspath(directory)
+ if not op.exists(directory):
+ assure_created(op.dirname(directory))
+ os.mkdir(directory)
+ return directory
+
+
+### Top-level directories ###
+def top_video_dir() -> str:
+ """Path of the data directory with subfolders per dataset, each
+ containing input images/masks/etc."""
+ return assure_created(op.join(BUNDLENETS_REPO_DIR, "data"))
+
+def top_results_dir() -> str:
+ """Path of the results directory with subfolders per dataset and sub-
+ subfolders per BundleSDF experiment."""
+ return assure_created(op.join(BUNDLENETS_REPO_DIR, "results"))
+
+def top_geometry_dir() -> str:
+ """Path of the geometry directory with subfolders per dataset and sub-
+ subfolders per PLL experiment."""
+ return assure_created(op.join(BUNDLENETS_REPO_DIR, "geometry"))
+
+
+### Specific sub-directories ###
+def video_dir(dataset: str, check_exists: bool = True) -> str:
+ """Path of the data directory for a specific dataset."""
+ directory_path = op.join(top_video_dir(), dataset)
+ if check_exists:
+ assert op.isdir(directory_path), f'Dataset directory ' + \
+ f'{directory_path} does not exist.'
+ return directory_path
+
+def geometry_dir(dataset: str, geometry_cycle_iteration: int, pll_run_id: str,
+ check_exists: bool = True) -> str:
+ """Path of the geometry directory for a specific dataset. Returns None if
+ no PLL run ID is given.
+
+ Args:
+ dataset: Name of the dataset, e.g. cube_2.
+ geometry_cycle_iteration: BundleSDF cycle iteration number from which
+ the PLL geometry results were generated. This number can be 0 if
+ no PLL geometry results are to be used. If this number is -1, then
+ the PLL geometry results were generated from TagSLAM trajectories.
+ Any positive number >= 1 corresponds to the BundleSDF-PLL cyclic
+ pipeline iteration number.
+ pll_run_id: PLL run ID.
+
+ Returns:
+ Path of the geometry directory in which the PLL geometry results are
+ stored. Returns None if no geometry is to be used.
+ """
+ # No geometry directory if geometry cycle iteration is 0.
+ if geometry_cycle_iteration == 0:
+ assert pll_run_id is None, f'Got geometry cycle iteration 0 but ' + \
+ f'also got {pll_run_id=}.'
+ return None
+
+ assert pll_run_id is not None, f'Got geometry cycle iteration ' + \
+ f'{geometry_cycle_iteration} but not given a PLL ID.'
+
+ # Given PLL run ID and geometry cycle iteration, search for it in the
+ # geometry directory. Since BundleSDF iterations run on a PLL result from
+ # TagSLAM trajectories correspond to cycle_iteration = 0, then the geometry
+ # cycle iteration will be denoted as -1.
+ tracker = 'tagslam' if geometry_cycle_iteration == -1 else \
+ f'bundlesdf_iteration_{geometry_cycle_iteration}'
+
+ directory_path = op.join(top_geometry_dir(), dataset, tracker, pll_run_id)
+ if check_exists:
+ assert op.isdir(directory_path), f'Geometry directory ' + \
+ f'{directory_path} does not exist.'
+
+ return directory_path
+
+def results_dir(dataset: str, cycle_iteration: int, bundlesdf_id: str,
+ create: bool = False) -> str:
+ """Path of the results directory for a specific dataset and BundleSDF run
+ ID.
+
+ Args:
+ dataset: Name of the dataset, e.g. cube_2.
+ cycle_iteration: BundleSDF cycle iteration number from which the PLL
+ geometry results were generated.
+ bundlesdf_id: BundleSDF run ID.
+ create: Whether to create the directory if it does not exist.
+
+ Returns:
+ Path of the results directory in which the BundleSDF results can be
+ stored. If ``create`` is True, the directory is assured to exist.
+ """
+ tracker = 'tagslam' if cycle_iteration == 0 else \
+ f'bundlesdf_iteration_{cycle_iteration}'
+
+ results_path = op.join(top_results_dir(), dataset, tracker, bundlesdf_id)
+ if create:
+ return assure_created(results_path)
+ return results_path
+
+
+### Video sub-directories ###
+def video_rgb_dir(dataset: str, check_parent_exists: bool = True) -> str:
+ """Path of the RGB images directory for a specific dataset."""
+ return assure_created(
+ op.join(video_dir(dataset, check_exists=check_parent_exists), 'rgb'))
+
+def video_depth_dir(dataset: str, check_parent_exists: bool = True) -> str:
+ """Path of the depth images directory for a specific dataset."""
+ return assure_created(
+ op.join(video_dir(dataset, check_exists=check_parent_exists), 'depth'))
+
+def video_mask_dir(dataset: str, check_parent_exists: bool = True) -> str:
+ """Path of the mask images directory for a specific dataset."""
+ return assure_created(
+ op.join(video_dir(dataset, check_exists=check_parent_exists), 'masks'))
+
+
+### Results sub-directories ###
+def nerf_temp_subdir(out_folder: str) -> str:
+ """Path of the temporary NeRF results sub-directory within a BundleSDF
+ results directory."""
+ return assure_created(op.join(out_folder, 'nerf_with_bundletrack_online'))
+
+def nerf_results_subdir(out_folder: str, bundlesdf_run_id: str,
+ create: bool = False) -> str:
+ """Path of the NeRF results sub-directory within a BundleSDF results
+ directory."""
+ if create:
+ return assure_created(
+ op.join(out_folder, 'nerf_runs', bundlesdf_run_id))
+ return op.join(out_folder, 'nerf_runs', bundlesdf_run_id)
+
+def nerf_results_sdf_inspection_subdir(out_folder: str, bundlesdf_run_id: str
+ ) -> str:
+ """Path of the NeRF results sub-directory within a NeRF results directory
+ for inspecting the SDF."""
+ nerf_folder = nerf_results_subdir(out_folder, bundlesdf_run_id)
+ return assure_created(op.join(nerf_folder, 'sdf_inspection'))
+
+def get_nerf_offset_transform(out_folder: str, bundlesdf_run_id: str
+ ) -> np.ndarray:
+ """Load the offset transformation. First this checks if the parent
+ tracking experiment has a stored offset, then checks if the child shape
+ reconstruction experiment has a stored offset, and otherwise returns the
+ identity."""
+ if op.exists(op.join(out_folder, 'offset.txt')):
+ return np.loadtxt(op.join(out_folder, 'offset.txt'))
+
+ nerf_folder = nerf_results_subdir(out_folder, bundlesdf_run_id)
+ if op.exists(op.join(nerf_folder, 'offset.txt')):
+ return np.loadtxt(op.join(nerf_folder, 'offset.txt'))
+
+ return np.eye(4)
+
+
+### Geometry files ###
+def load_geometry_support_points(geometry_dir: str) -> Tensor:
+ """Load the torch file ps.pt, containing 3D points whose signed distances
+ correspond to the sdfs.pt in a geometry directory. Both of these come from
+ support points (not mesh surface) for more accurate supervision."""
+ filepath = op.join(geometry_dir, 'from_support_points', 'ps.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath)
+
+def load_geometry_sdfs(geometry_dir: str) -> Tensor:
+ """Load the torch file sdfs.pt, containing signed distances corresponding
+ to the ps.pt support points in a geometry directory. Both of these come
+ from support points (not mesh surface)."""
+ filepath = op.join(geometry_dir, 'from_support_points', 'sdfs.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath).float().cuda()
+
+def load_geometry_sdf_bounded_mesh_points(geometry_dir: str) -> Tensor:
+ """Load the torch file vs.pt, containing 3D points whose signed distances
+ are minimum bounded by the sdf_bounds.pt in a geometry directory. Both of
+ these come from the mesh surface (not the support points) for more global
+ coverage."""
+ filepath = op.join(geometry_dir, 'from_mesh_surface', 'vs.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath)
+
+def load_geometry_sdf_bounds(geometry_dir: str) -> Tensor:
+ """Load the torch file sdf_bounds.pt, containing signed distance minimum
+ bounds corresponding to the vs.pt perturbed mesh points in a geometry
+ directory. Both of these come from the mesh surface (not the support
+ points) for more global coverage."""
+ filepath = op.join(geometry_dir, 'from_mesh_surface', 'sdf_bounds.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath).float().cuda()
+
+def load_geometry_sdf_gradient_mesh_points(geometry_dir: str) -> Tensor:
+ """Load the torch file ws.pt, containing 3D points whose signed distance
+ gradients correspond to the w_normals.pt in a geometry directory. Both of
+ these come from the mesh surface (not the support points) for more global
+ coverage."""
+ filepath = op.join(geometry_dir, 'from_mesh_surface', 'ws.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath)
+
+def load_geometry_sdf_gradients(geometry_dir: str) -> Tensor:
+ """Load the torch file w_normals.pt, containing signed distance gradients
+ corresponding to the ws.pt perturbed mesh points in a geometry directory.
+ Both of these come from the mesh surface (not the support points) for more
+ global coverage."""
+ filepath = op.join(geometry_dir, 'from_mesh_surface', 'w_normals.pt')
+ assert op.isfile(filepath), f'Geometry file {filepath} does not exist.'
+ return torch.load(filepath)
+
+def get_geometry_contact_in_cam_path(geometry_dir: str) -> str:
+ dirpath = op.join(geometry_dir, 'contact_in_cam')
+ assert op.isdir(dirpath), f'Contact in cam folder {dirpath} does not exist.'
+ return dirpath
+
+### Tracking files ###
+def load_first_pose_in_cam(out_folder: str) -> np.ndarray:
+ """Load the first pose in camera coordinates of the BundleSDF origin from a
+ BundleSDF results directory."""
+ filepath = op.join(out_folder, 'ob_in_cam', '0001.txt')
+ assert op.isfile(filepath), f'Tracking file {filepath} does not exist.'
+ return np.loadtxt(filepath)
+
+
+### Base configuration file management ###
+def get_base_bundlesdf_configuration_filepath(filename: str) -> str:
+ """Get the path to a base configuration file, checks that exists."""
+ filepath = op.join(BUNDLENETS_REPO_DIR, "BundleTrack", filename)
+ assert op.isfile(filepath), f'Configuration file {filepath} does not exist.'
+ return filepath
+
+def load_base_ho3d_bundlesdf_configuration() -> dict:
+ """Load the configuration for the HO3D dataset."""
+ filepath = get_base_bundlesdf_configuration_filepath("config_ho3d.yml")
+ return yaml.load(open(filepath, 'r'))
+
+def load_base_toss_bundlesdf_configuration() -> dict:
+ """Load the configuration for the PLL toss dataset."""
+ filepath = get_base_bundlesdf_configuration_filepath("config_toss.yml")
+ return yaml.load(open(filepath, 'r'))
+
+def load_base_toss_nerf_configuration() -> dict:
+ """Load the main NeRF configuration file."""
+ filepath = op.join(BUNDLENETS_REPO_DIR, "assets", "config_toss_nerf.yml")
+ return yaml.load(open(filepath, 'r'))
+
+
+### Run-specific configuration file management ###
+def get_run_bundlesdf_configuration_filepath(out_folder: str) -> str:
+ """Get the path to an experiment's BundleSDF configuration file."""
+ return op.join(out_folder, "config_bundlesdf.yml")
+
+def get_run_outer_nerf_configuration_filepath(out_folder: str) -> str:
+ """Get the path to an experiment's NeRF configuration file in results folder.
+ """
+ return op.join(out_folder, "config_nerf.yml")
+
+def get_run_inner_nerf_configuration_filepath(out_folder: str, bundlesdf_id: str) -> str:
+ """Get the path to an experiment's NeRF configuration file in
+ out_folder/nerf_runs/bundlesdf_id_xxx/nerf folder."""
+ return op.join(nerf_results_subdir(out_folder, bundlesdf_id), "nerf", "config.yml")
+
+def get_run_online_nerf_configuration_filepath(out_folder: str) -> str:
+ """Get the path to an experiment's NeRF configuration file in
+ out_folder/nerf_with_bundletrack_online folder."""
+ return op.join(nerf_temp_subdir(out_folder), "config.yml")
+
+def load_run_bundlesdf_configuration(out_folder: str) -> dict:
+ """Load an experiment's BundleSDF configuration from its results folder."""
+ filepath = get_run_bundlesdf_configuration_filepath(out_folder)
+ assert op.isfile(filepath), f'Configuration file {filepath} does not exist.'
+ return yaml.load(open(filepath, 'r'))
+
+def load_run_outer_nerf_configuration(out_folder: str) -> dict:
+ """Load an experiment's NeRF configuration from its results folder."""
+ filepath = get_run_outer_nerf_configuration_filepath(out_folder)
+ assert op.isfile(filepath), f'Configuration file {filepath} does not exist.'
+ return yaml.load(open(filepath, 'r'))
+
+def load_run_inner_nerf_configuration(out_folder: str, bundlesdf_id: str) -> dict:
+ """Load an experiment's online NeRF configuration from its results folder.
+ """
+ filepath = get_run_inner_nerf_configuration_filepath(out_folder, bundlesdf_id)
+ assert op.isfile(filepath), f'Configuration file {filepath} does not exist.'
+ return yaml.load(open(filepath, 'r'))
+
+def load_run_online_nerf_configuration(out_folder: str) -> dict:
+ """Load an experiment's online NeRF configuration from its results folder.
+ """
+ filepath = get_run_online_nerf_configuration_filepath(out_folder)
+ assert op.isfile(filepath), f'Configuration file {filepath} does not exist.'
+ return yaml.load(open(filepath, 'r'))
+
+def save_run_bundlesdf_configuration(config: dict, out_folder: str) -> None:
+ """Save an experiment's BundleSDF configuration to its results folder."""
+ filename = get_run_bundlesdf_configuration_filepath(out_folder)
+ yaml.dump(config, open(filename, 'w'))
+
+def save_run_outer_nerf_configuration(config: dict, out_folder: str) -> None:
+ """Save an experiment's NeRF configuration to its results folder."""
+ filename = get_run_outer_nerf_configuration_filepath(out_folder)
+ yaml.dump(config, open(filename, 'w'))
+
+def save_run_online_nerf_configuration(config: dict, out_folder: str) -> None:
+ """Save an experiment's NeRF configuration to its
+ results/nerf_with_bundletrack_online folder."""
+ filename = get_run_online_nerf_configuration_filepath(out_folder)
+ yaml.dump(config, open(filename, 'w'))
+
+
+### Agnostic directory management ###
+def remove_and_add_directory(directory: str) -> str:
+ """Remove and re-create a directory."""
+ os.system(f'rm -rf {directory} && mkdir -p {directory}')
+
+def pll_run_id_from_bundlesdf_id(dataset: str, cycle_iteration: int,
+ bundlesdf_id: str) -> str:
+ """Get the PLL run ID associated with a particular BundleSDF run, returning
+ None if there is no associated PLL information."""
+ # This run may have happened with or without TagSLAM-based PLL results.
+ out_with_tagslam_dir = results_dir(
+ dataset, 0, bundlesdf_id=bundlesdf_id)
+ out_without_tagslam_dir = results_dir(
+ dataset, cycle_iteration, bundlesdf_id=bundlesdf_id)
+
+ # Don't know which run to choose if multiple.
+ assert not (op.isdir(out_with_tagslam_dir) and
+ op.isdir(out_without_tagslam_dir)), f'Both ' + \
+ f'{out_with_tagslam_dir} and {out_without_tagslam_dir} exist.'
+
+ # Could not find results for this BundleSDF ID in this dataset and cycle.
+ assert op.isdir(out_with_tagslam_dir) or op.isdir(out_without_tagslam_dir),\
+ f'Neither {out_with_tagslam_dir} nor {out_without_tagslam_dir} exist.'
+
+ # Found a unique result; extract its associated PLL run ID from its NeRF
+ # configuration.
+ out_folder = out_with_tagslam_dir if op.isdir(out_with_tagslam_dir) \
+ else out_without_tagslam_dir
+
+ # First look for inner NeRF configuration if global NeRF training was run.
+ # Otherwise, fall back on the original NeRF configuration created when
+ # tracking was run.
+ try:
+ nerf_config = load_run_inner_nerf_configuration(out_folder, bundlesdf_id)
+ except AssertionError:
+ print(f'Could not load inner NeRF configuration from {out_folder}; ' + \
+ f'using outer NeRF configuration instead.\n')
+ nerf_config = load_run_outer_nerf_configuration(out_folder)
+
+ try:
+ geometry_dir = nerf_config['geometry_dir']
+ if geometry_dir is None:
+ return None
+ return op.basename(op.basename(geometry_dir))
+ except KeyError:
+ print(f'No geometry_dir key in NeRF configuration in {out_folder}; ' + \
+ f'assuming no PLL inputs associated with BundleSDF run.\n')
+ return None
+
+def format_run_id(run_id: str, is_bundlesdf_not_pll: bool = None) -> str:
+ """Format a run ID to include a prefix. Handle the following cases:
+ - run_id is None: This should only happen for a PLL run ID, which means
+ there is no associated PLL run ID --> return None.
+ - run_id is "": This should only happen for a BundleSDF ID since PLL
+ IDs need to have non-empty names --> generate a new BundleSDF ID
+ using the current timestamp.
+ - run_id doesn't start with a prefix: Add the prefix to the run ID.
+ - run_id starts with the right prefix: Do nothing to the run ID.
+ """
+ assert is_bundlesdf_not_pll is not None, f'Need to specify whether ' + \
+ f'{run_id=} is a BundleSDF or PLL run ID to format properly.'
+
+ prefix = 'bundlesdf_id' if is_bundlesdf_not_pll else 'pll_id'
+
+ if run_id is None:
+ assert not is_bundlesdf_not_pll, f'Got an empty run ID for a ' + \
+ 'BundleSDF run, which is not allowed.'
+ return None
+ if run_id == "":
+ assert is_bundlesdf_not_pll, f'Got a PLL run ID of "" which is not ' + \
+ 'allowed.'
+ return f'{prefix}_{str(int(time.time()))}'
+ if run_id[:len(prefix)] != prefix:
+ return f'{prefix}_{run_id}'
+
+ return run_id
+
diff --git a/gui.py b/bundlenets/gui.py
similarity index 97%
rename from gui.py
rename to bundlenets/gui.py
index 8d8b0b7..c25841c 100644
--- a/gui.py
+++ b/bundlenets/gui.py
@@ -7,15 +7,15 @@
# license agreement from NVIDIA CORPORATION is strictly prohibited.
-from Utils import *
+from bundlenets.Utils import *
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
import dearpygui.dearpygui as dpg
class BundleSdfGui:
def __init__(self, img_height=300):
dpg.create_context()
- code_dir = os.path.dirname(os.path.realpath(__file__))
- dpg.create_viewport(large_icon=f'{code_dir}/dpg.jpg')
+ dpg.create_viewport(large_icon=f'{BUNDLENETS_REPO_DIR}/dpg.jpg')
dpg.setup_dearpygui()
self.H = int(img_height)
self.W = None
diff --git a/iterative_closest_point.py b/bundlenets/iterative_closest_point.py
similarity index 100%
rename from iterative_closest_point.py
rename to bundlenets/iterative_closest_point.py
diff --git a/loftr_wrapper.py b/bundlenets/loftr_wrapper.py
similarity index 92%
rename from loftr_wrapper.py
rename to bundlenets/loftr_wrapper.py
index 002a08c..89a0085 100644
--- a/loftr_wrapper.py
+++ b/bundlenets/loftr_wrapper.py
@@ -8,12 +8,12 @@
import os,zmq,pdb,sys,time,torchvision
-code_dir = os.path.dirname(os.path.realpath(__file__))
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
import argparse
import cv2
import torch,imageio
from BundleTrack.LoFTR.src.loftr import *
-from Utils import *
+from bundlenets.Utils import *
class LoftrRunner:
@@ -21,7 +21,7 @@ def __init__(self):
default_cfg['match_coarse']['thr'] = 0.2
print("default_cfg",default_cfg)
self.matcher = LoFTR(config=default_cfg)
- self.matcher.load_state_dict(torch.load(f'{code_dir}/BundleTrack/LoFTR/weights/outdoor_ds.ckpt')['state_dict'])
+ self.matcher.load_state_dict(torch.load(f'{BUNDLENETS_REPO_DIR}/BundleTrack/LoFTR/weights/outdoor_ds.ckpt')['state_dict'])
self.matcher = self.matcher.eval().cuda()
diff --git a/bundlenets/mask_shrink.py b/bundlenets/mask_shrink.py
new file mode 100644
index 0000000..117fd1d
--- /dev/null
+++ b/bundlenets/mask_shrink.py
@@ -0,0 +1,57 @@
+"""
+This script will load a mesh and a mask, shrink the mask to exclude all vertices that share an edge with the unmasked vertices, and save the new mask.
+"""
+
+def get_connected_vertices(mesh, vertices):
+ """Get all vertices that share an edge with given vertices"""
+ connected = set()
+
+ # For each face
+ for face in mesh.faces:
+ # If any vertex in face is in our target set
+ if any(v in vertices for v in face):
+ # Add all vertices of this face
+ connected.update(face)
+
+ return connected
+
+def remove_vertices_connected_to_unhit(mesh, vertices_hit_mask):
+ vertices_unhit = np.where(vertices_hit_mask==0)[0]
+ vertices_unhit_expanded = get_connected_vertices(mesh, vertices_unhit)
+ vertices_unhit_expanded = np.array(list(vertices_unhit_expanded))
+ mask_new = vertices_hit_mask.copy()
+ mask_new[vertices_unhit_expanded] = 0
+ return mask_new
+
+def remove_vertices_too_close_to_unhit(mesh, vertices_hit_mask):
+ indices_vertices_hit = np.where(vertices_hit_mask)[0]
+ indices_vertices_unhit = np.where(~vertices_hit_mask)[0]
+ # all vertices within 30 degrees of the unhit vertices are also unhit
+ vertices_hit = mesh.vertices[indices_vertices_hit]
+ vertices_unhit = mesh.vertices[indices_vertices_unhit]
+ vertices_hit_unit = vertices_hit/np.linalg.norm(vertices_hit, axis=1, keepdims=True)
+ vertices_unhit_unit = vertices_unhit/np.linalg.norm(vertices_unhit, axis=1, keepdims=True)
+ dot_product = np.dot(vertices_hit_unit, vertices_unhit_unit.T)
+ vertices_hit_too_close_to_unhit = np.any(dot_product > np.cos(np.deg2rad(30)), axis=1)
+ vertices_hit_mask[indices_vertices_hit[vertices_hit_too_close_to_unhit]] = False
+ return vertices_hit_mask
+
+mesh_path = "/mnt/data0/minghz/repos/bundlenets/results/robot_oatly_5/bundlesdf_iteration_1/bundlesdf_id_00-occ/nerf_runs/bundlesdf_id_00-occ-mask/mesh_hull_world.obj"
+mask_path = "/mnt/data0/minghz/repos/bundlenets/results/robot_oatly_5/bundlesdf_iteration_1/bundlesdf_id_00-occ/nerf_runs/bundlesdf_id_00-occ-mask/mesh_hull_visible_mask_original.txt"
+
+# Load mesh
+import trimesh
+mesh = trimesh.load(mesh_path)
+
+# Load mask
+import numpy as np
+mask = np.loadtxt(mask_path).astype(bool)
+
+# Get vertices of mask
+# mask_new = remove_vertices_connected_to_unhit(mesh, mask)
+mask_new = remove_vertices_too_close_to_unhit(mesh, mask)
+
+
+# Save new mask
+mask_path_new = "/mnt/data0/minghz/repos/bundlenets/results/robot_oatly_5/bundlesdf_iteration_1/bundlesdf_id_00-occ/nerf_runs/bundlesdf_id_00-occ-mask/mesh_hull_visible_mask.txt"
+np.savetxt(mask_path_new, mask_new, fmt='%d')
diff --git a/mesh_utils.py b/bundlenets/mesh_utils.py
similarity index 95%
rename from mesh_utils.py
rename to bundlenets/mesh_utils.py
index 3338b31..f9a9271 100644
--- a/mesh_utils.py
+++ b/bundlenets/mesh_utils.py
@@ -12,7 +12,8 @@
import torch
from torch import Tensor
import os
-from Utils import trimesh_split, trimesh_clean
+from bundlenets.Utils import trimesh_split, trimesh_clean
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
def get_inertia(obj_file):
coords = np.loadtxt(obj_file, unpack=True, delimiter=',', dtype=int)
@@ -366,16 +367,16 @@ def post_process_obj(input_mesh, output_mesh):
# get inertia
# mesh_file = './mesh_convex_hull.txt'
# print(get_inertia(mesh_file))
- code_dir = os.path.dirname(os.path.realpath(__file__))
- original_mesh = f'{code_dir}/assets/{filename}.obj'
- original_mesh_meters = f'{code_dir}/assets/{filename}_meters.obj'
- simplified_mesh = f'{code_dir}/assets/{filename}_simple.obj'
- output_path = f'{code_dir}/assets/{filename}_optimized.obj'
- normal_mesh = f'{code_dir}/assets/{filename}_with_normals.obj'
- rescale_mesh = f'{code_dir}/assets/{filename}_rescale.obj'
- alt_simplified_mesh = f'{code_dir}/assets/{filename}_rescale_simplified_alt.obj'
- amplified_mesh = f'{code_dir}/assets/{filename}_amp.obj'
- convex_mesh = f'{code_dir}/assets/{filename}_convex.obj'
+ assets_dir = os.path.join(BUNDLENETS_REPO_DIR, 'assets')
+ original_mesh = f'{assets_dir}/{filename}.obj'
+ original_mesh_meters = f'{assets_dir}/{filename}_meters.obj'
+ simplified_mesh = f'{assets_dir}/{filename}_simple.obj'
+ output_path = f'{assets_dir}/{filename}_optimized.obj'
+ normal_mesh = f'{assets_dir}/{filename}_with_normals.obj'
+ rescale_mesh = f'{assets_dir}/{filename}_rescale.obj'
+ alt_simplified_mesh = f'{assets_dir}/{filename}_rescale_simplified_alt.obj'
+ amplified_mesh = f'{assets_dir}/{filename}_amp.obj'
+ convex_mesh = f'{assets_dir}/{filename}_convex.obj'
""""Align two point clouds through pca and icp. Pre-process init_nerf shape.
"""
# pcd1 = "/Users/chris/Downloads/dair_napkin_amp.obj"
diff --git a/nerf_helpers.py b/bundlenets/nerf_helpers.py
similarity index 57%
rename from nerf_helpers.py
rename to bundlenets/nerf_helpers.py
index eb44c16..0e2e07d 100644
--- a/nerf_helpers.py
+++ b/bundlenets/nerf_helpers.py
@@ -11,12 +11,16 @@
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
-from Utils import *
+from bundlenets.Utils import *
from pytorch3d.transforms import so3_log_map,so3_exp_map,se3_exp_map
+from typing import Optional, Union
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
+FS_WEIGHT = 0.5
+SDF_WEIGHT = 1.0 - FS_WEIGHT
+
class SHEncoder(nn.Module):
@@ -109,7 +113,6 @@ class FeatureArray(nn.Module):
"""
Per-frame corrective latent code.
"""
-
def __init__(self, num_frames, num_channels):
super().__init__()
@@ -128,19 +131,22 @@ class PoseArray(nn.Module):
"""
Per-frame camera pose correction in the normalized space.
- The pose correction contains 6 parameters for each pose (3 for rotation, 3 for translation).
- The rotation parameters define axis-angles which can be converted into a rotation matrix.
+ The pose correction contains 6 parameters for each pose (3 for rotation, 3
+ for translation).
+ The rotation parameters define axis-angles which can be converted into a
+ rotation matrix.
"""
- def __init__(self, num_frames,max_trans,max_rot):
+ def __init__(self, num_frames, max_trans, max_rot):
super().__init__()
self.num_frames = num_frames
self.max_trans = max_trans
self.max_rot = max_rot
- self.data = nn.parameter.Parameter(torch.zeros([num_frames, 6]).float(), requires_grad=True)
+ self.data = nn.parameter.Parameter(torch.zeros([num_frames, 6]).float(),
+ requires_grad=True)
self.register_parameter('data',self.data)
- def get_matrices(self,ids):
+ def get_matrices(self, ids):
if not torch.is_tensor(ids):
ids = torch.tensor(ids).long()
@@ -148,7 +154,8 @@ def get_matrices(self,ids):
trans = theta[:,:3] * self.max_trans
rot = theta[:,3:6] * self.max_rot/180.0*np.pi
Ts_data = se3_exp_map(torch.cat((trans,rot),dim=-1)).permute(0,2,1)
- Ts = torch.eye(4, device=self.data.device).reshape(1,4,4).repeat(len(ids),1,1)
+ Ts = torch.eye(4, device=self.data.device).reshape(1,4,4).repeat(
+ len(ids),1,1)
mask = ids!=0
Ts[mask] = Ts_data[ids[mask]]
return Ts
@@ -214,8 +221,7 @@ def get_embedder(multires, cfg, i=0, octree_m=None):
return embed, out_dim
-
-def preprocess_data(rgbs,depths,masks,normal_maps,poses,sc_factor,translation):
+def preprocess_data(rgbs,depths,masks,normal_maps,poses,sc_factor,translation, ps=None, sdfs=None, vs=None, sdf_bounds=None):
'''
@rgbs: np array (N,H,W,3)
@depths: (N,H,W)
@@ -237,7 +243,13 @@ def preprocess_data(rgbs,depths,masks,normal_maps,poses,sc_factor,translation):
depths = depths[...,None]
poses[:, :3, 3] += translation
poses[:, :3, 3] *= sc_factor
- return rgbs,depths,masks,normal_maps,poses
+
+ if ps is not None:
+ ps = [p * sc_factor if p is not None else None for p in ps ]
+ if vs is not None:
+ vs = [v * sc_factor if v is not None else None for v in vs ]
+ # sdfs and sdf_bounds are converted in NerfRunner.
+ return rgbs,depths,masks,normal_maps,poses, ps, sdfs, vs, sdf_bounds
class NeRFSmall(nn.Module):
@@ -394,7 +406,7 @@ def sample_pdf(bins, weights, N_samples, det=False):
return samples
-
+### Rays
def get_camera_rays_np(H, W, K):
"""Get ray origins, directions from a pinhole camera."""
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
@@ -402,26 +414,121 @@ def get_camera_rays_np(H, W, K):
dirs = np.stack([(i - K[0,2])/K[0,0], -(j - K[1,2])/K[1,1], -np.ones_like(i)], axis=-1)
return dirs
+def ray_box_intersection_batch(origins, dirs, bounds):
+ '''
+ @origins: (N,3) origin and directions. In the same coordinate frame as the bounding box
+ @bounds: (2,3) xyz_min and max
+ '''
+ if not torch.is_tensor(origins):
+ origins = torch.tensor(origins)
+ dirs = torch.tensor(dirs)
+ if not torch.is_tensor(bounds):
+ bounds = torch.tensor(bounds)
+
+ dirs = dirs/(torch.norm(dirs,dim=-1,keepdim=True)+1e-10)
+ inv_dirs = 1/dirs
+ bounds = bounds[None].expand(len(dirs),-1,-1) #(N,2,3)
+
+ sign = torch.zeros((len(dirs),3)).long().to(dirs.device) #(N,3)
+ sign[:,0] = (inv_dirs[:,0] < 0)
+ sign[:,1] = (inv_dirs[:,1] < 0)
+ sign[:,2] = (inv_dirs[:,2] < 0)
+
+ tmin = (torch.gather(bounds[...,0],dim=1,index=sign[:,0].reshape(-1,1)).reshape(-1) - origins[:,0]) * inv_dirs[:,0] #(N)
+ tmin[tmin<0] = 0
+ tmax = (torch.gather(bounds[...,0],dim=1,index=1-sign[:,0].reshape(-1,1)).reshape(-1) - origins[:,0]) * inv_dirs[:,0]
+ tymin = (torch.gather(bounds[...,1],dim=1,index=sign[:,1].reshape(-1,1)).reshape(-1) - origins[:,1]) * inv_dirs[:,1]
+ tymin[tymin<0] = 0
+ tymax = (torch.gather(bounds[...,1],dim=1,index=1-sign[:,1].reshape(-1,1)).reshape(-1) - origins[:,1]) * inv_dirs[:,1]
+
+ ishit = torch.ones(len(dirs)).bool().to(dirs.device)
+ ishit[(tmin > tymax) | (tymin > tmax)] = 0
+ tmin[tymin>tmin] = tymin[tymin>tmin]
+ tmax[tymax tzmax) | (tzmin > tmax)] = 0
+ tmin[tzmin>tmin] = tzmin[tzmin>tmin] #(N)
+ tmax[tzmax=cfg['near']*cfg['sc_factor']) & (target_d<=cfg['far']*cfg['sc_factor'])
+ return tmin, tmax
+
+
+### Masking
+def get_masks_from_ray_sample_distances(z_vals, target_d, truncation, cfg,
+ dir_norm=None):
+ """Get masks for the samples that are (1) empty space and (2) near-surface
+ space. Also return the loss weights to use for each of the corresponding
+ loss terms.
+
+ Args:
+ z_vals: (n_rays, n_samples) sampled distances in normalized space along
+ the camera ray passing through depth returns. A z_vals value that's
+ the same as a target_d value is a ray sample directly on top of a
+ depth return.
+ target_d: (n_rays, n_samples) normalized space depth returns from the
+ camera. Every column is the same, just repeated for the number of
+ samples so the size is comparable to z_vals.
+ truncation: (float) truncation distance for the SDF in normalized units.
+ cfg: (dict) NeRF configuration dictionary, containing:
+ - sc_factor: conversion in units [normalized units / meter].
+ - near: near clipping distance in meters from the camera.
+ - far: far clipping distance in meters from the camera.
+ - neg_trunc_ratio: ratio of the truncation distance to use for
+ samples beyond the depth return, compared to samples in front of
+ the depth return.
+
+ Returns:
+ front_mask: (n_rays, n_samples) mask for samples that qualify for the
+ empty space loss.
+ sdf_mask: (n_rays, n_samples) mask for samples that qualify for the
+ near-surface (i.e. SDF) loss.
+ """
+ # Identify valid depth values as those not too close or far from the camera.
+ valid_depth_mask = (target_d >= cfg['near']*cfg['sc_factor']) & \
+ (target_d <= cfg['far']*cfg['sc_factor'])
+
+ # Identify sampled ray points that are too far in front of the depth return.
front_mask = (z_vals < target_d - truncation)
+
+ # Identify sampled ray points that are too far beyond the depth return.
back_mask = (z_vals > target_d + truncation*cfg['neg_trunc_ratio'])
- sdf_mask = (1.0 - front_mask.float()) * (1.0 - back_mask.float()) * valid_depth_mask
+ # SDF mask keeps samples that are within a valid distance from the depth
+ # return, and within a valid distance range from the camera.
+ sdf_mask = (1.0 - front_mask.float()) * (1.0 - back_mask.float()) * \
+ valid_depth_mask
- num_fs_samples = front_mask.sum()
- num_sdf_samples = sdf_mask.sum()
- num_samples = num_sdf_samples + num_fs_samples
- fs_weight = 0.5
- sdf_weight = 1.0 - fs_weight
- return front_mask.bool(), sdf_mask.bool(), fs_weight, sdf_weight
+ return front_mask.bool(), sdf_mask.bool(), FS_WEIGHT, SDF_WEIGHT
-def get_masks_sdf(sampled_sdf, cfg):
- '''
- @sampled_sdf: in double-scaled unit
- '''
+def get_surface_masks_from_sdf_values(sampled_sdf):
+ sdf_mask = (sampled_sdf < 0.01) & (sampled_sdf > - 0.01)
+ return sdf_mask.bool()
+
+def get_masks_from_sdf_values(sampled_sdf, cfg):
+ """Get masks for the sampled SDF values that are (1) empty space and (2)
+ near-surface space.
+
+ Args:
+ sampled_sdf: sampled SDF values in the double-normalized space such that
+ a value of 1 corresponds to a signed distance at the boundary of the
+ truncation region.
+ cfg: (dict) NeRF configuration dictionary, containing:
+ - neg_trunc_ratio: ratio of the truncation distance to use for
+ negative SDF values, compared to positive SDF values.
+
+ Returns:
+ empty_mask: (n_rays, n_samples) mask for SDF values that qualify for the
+ empty space loss.
+ sdf_mask: (n_rays, n_samples) mask for SDF values that qualify for the
+ near-surface (i.e. SDF) loss.
+ """
sdf_mask = (sampled_sdf < 1) & (sampled_sdf > -cfg['neg_trunc_ratio'])
empty_mask = (sampled_sdf > 1)
return empty_mask.bool(), sdf_mask.bool()
@@ -435,61 +542,366 @@ def get_masks_sdf_pretrain(sampled_sdf, cfg):
mid_mask = (~negative_mask) & (~positive_mask)
return negative_mask.bool(), positive_mask.bool(), mid_mask.bool()
-def get_sdf_loss(z_vals, target_d, predicted_sdf, truncation, cfg, return_mask=False, sample_weights=None, rays_d=None):
- dir_norm = rays_d.norm(dim=-1,keepdim=True)
- front_mask, sdf_mask, fs_weight, sdf_weight = get_masks(z_vals, target_d, truncation, cfg, dir_norm=dir_norm)
- front_mask = front_mask.bool()
-
- mask = (target_d>cfg['far']*cfg['sc_factor']) & (predicted_sdf cfg['far'] * cfg['sc_factor']) & \
+ (predicted_sdf < cfg['fs_sdf'])
+
+ uncertain_fs_mask_far = target_d > cfg['far'] * cfg['sc_factor']
+ # Encourage the predicted SDF to be the config's free space SDF value for
+ # points that are eligible for uncertain free space loss.
+ uncertain_fs_loss = torch.mean(
+ ((predicted_sdf - cfg['fs_sdf']) * uncertain_fs_mask)**2 * \
+ sample_weights) * fs_weight
+
+ # Only apply the empty space loss if the predicted SDF is less than the
+ # truncation distance. If the predicted SDF is above the truncation
+ # distance already, don't bother regressing on it.
+ empty_space_mask = front_mask & (predicted_sdf < 1)
+ empty_loss = torch.mean(
+ torch.abs(predicted_sdf-1) * empty_space_mask * sample_weights
+ ) * cfg['empty_weight']
+
+ # SDF loss is in normalized space. target_d is the actual (normalized)
+ # depth reading from the camera. z_vals are sampled (normalized) distances
+ # along that ray (z_val=0 means at the camera, z_val=target_d means at the
+ # depth return). predicted_sdf is in double-normalized space, so is
+ # converted to normalized space by multiplying by truncation. The predicted
+ # SDFs should match the distance away from the target_d depth reading, so
+ # this loss penalizes that discrepancy.
+ # Note: A smaller z_val corresponds to a point closer to the camera, and
+ # thus a positive SDF value.
+ # predicted_sdf +z_vals/truncation - target_d / truncation = 0
+ gt_sdf = (target_d - z_vals) / truncation
+ sdf_loss = torch.mean(
+ ((z_vals + predicted_sdf*truncation - target_d)*sdf_mask)**2 * \
+ sample_weights) * sdf_weight
- sdf_loss = torch.mean(((z_vals + predicted_sdf * truncation) * sdf_mask - target_d * sdf_mask)**2 * sample_weights) * sdf_weight
- # print(f"{mask.sum()=}") # ~10000
- # print(f"{sdf_mask.sum()=}") # ~100000
-
- # print(f"{sample_weights*cfg['empty_weight']=}") # [[0.01, 0.01, ...]]
- # print(f"{sample_weights*sdf_weight=}") # [[0.5, 0.5, ...]]
- # print(f"{sample_weights*fs_weight=}") # [[0.5, 0.5, ...]]
if return_mask:
- return fs_loss,sdf_loss,front_mask,sdf_mask
- return fs_loss, sdf_loss
-
+ front_mask = front_mask * sample_weights.bool()
+ sdf_mask = sdf_mask * sample_weights.bool()
+ uncertain_fs_mask = uncertain_fs_mask * sample_weights.bool()
+ uncertain_fs_mask_far = uncertain_fs_mask_far * sample_weights.bool()
+ empty_space_mask = empty_space_mask * sample_weights.bool()
+ return uncertain_fs_loss, empty_loss, sdf_loss, \
+ front_mask, empty_space_mask, sdf_mask, \
+ uncertain_fs_mask, uncertain_fs_mask_far, gt_sdf
+ return uncertain_fs_loss, empty_loss, sdf_loss, gt_sdf
+
+# TODO check this function
+# Thoughts on this function: it has both the |diff| and the diff^2 terms but
+# each calculated with a different mask. One of them has a weight in the config
+# but the other has a hard-coded weight of 0.5.
def get_support_point_loss(predicted_sdf, support_point_sdf, cfg):
'''
@predicted_sdf: network output
@support_point_sdf: in double-scaled units
'''
- empty_mask, sdf_mask = get_masks_sdf(support_point_sdf, cfg)
+ empty_mask, sdf_mask = get_masks_from_sdf_values(support_point_sdf, cfg)
+ on_surface_gt_mask = get_surface_masks_from_sdf_values(support_point_sdf)
+ on_surface_pred_mask = get_surface_masks_from_sdf_values(predicted_sdf)
+
# print(f'{empty_mask.to(torch.int64).sum()=}')
mask = empty_mask & (predicted_sdf<1)
- predicted_sdf = predicted_sdf.squeeze()
- support_point_sdf = support_point_sdf.squeeze()
+ predicted_sdf = predicted_sdf.flatten()
+ support_point_sdf = support_point_sdf.flatten()
assert predicted_sdf.shape == support_point_sdf.shape
assert predicted_sdf.ndim == 1
diff = torch.mean(torch.abs(support_point_sdf - predicted_sdf)*mask) * cfg['empty_weight']
loss = diff
diff_ = torch.mean((support_point_sdf - predicted_sdf)**2 * sdf_mask) * 0.5
loss = loss + diff_
- return loss, mask, sdf_mask
+ return loss, mask, sdf_mask, on_surface_gt_mask, on_surface_pred_mask
+
+def sample_point_based_on_distance_kdtree(pts_to_sample, pts_ref, num_samples,
+ max_ref_points=10000):
+ '''
+ Sample points with probability inverse proportional to the distance to
+ the nearest reference point. Use KDTree for efficient distance computation.
+ Args:
+ pts_to_sample: torch.Tensor or np.array of points to sample from
+ pts_ref: torch.Tensor or np.array of reference points
+ num_samples: int, number of points to sample
+ max_ref_points: int, maximum number of reference points to use
+ Returns:
+ Sampled points as same type as input
+ '''
+ # Convert to numpy if needed
+ is_torch = torch.is_tensor(pts_to_sample)
+ if is_torch:
+ pts_to_sample_np = pts_to_sample.detach().cpu().numpy()
+ pts_ref_np = pts_ref.detach().cpu().numpy()
+ else:
+ pts_to_sample_np = pts_to_sample
+ pts_ref_np = pts_ref
+
+ # Subsample reference points if needed
+ if len(pts_ref_np) > max_ref_points:
+ idx = np.random.choice(len(pts_ref_np), max_ref_points, replace=False)
+ pts_ref_np = pts_ref_np[idx]
+
+ # Build KDTree
+ tree = cKDTree(pts_ref_np)
+
+ # Query nearest distances
+ min_distances, _ = tree.query(pts_to_sample_np, k=1)
+
+ # Compute sampling probabilities
+ probabilities = min_distances / np.sum(min_distances)
+
+ # Sample points
+ sampled_indices = np.random.choice(
+ len(pts_to_sample_np),
+ size=num_samples,
+ p=probabilities,
+ replace=True
+ )
+
+ # Return in same format as input
+ if is_torch:
+ return pts_to_sample[sampled_indices], sampled_indices
+ return pts_to_sample_np[sampled_indices], sampled_indices
+
+
+def sample_point_based_on_density_voxel(pts_to_sample, voxel_size=0.2, num_samples=1000):
+ '''
+ Sample points with probability inverse proportional to the distance to
+ the nearest reference point. Use voxel grid for fast density estimation.
+ Density is approximated by number of points in each voxel.
+ (cKDTree.query_ball_point is slow for large number of points)
+
+ Args:
+ pts_to_sample: torch.Tensor or np.array of shape (N, D)
+ voxel_size: float, size of voxel
+ num_samples: int, number of points to sample
+ Returns:
+ Sampled points in same format as input
+ '''
+ is_torch = torch.is_tensor(pts_to_sample)
+ if is_torch:
+ pts_np = pts_to_sample.detach().cpu().numpy()
+ else:
+ pts_np = pts_to_sample
+
+ # Convert points to voxel indices
+ voxel_indices = np.floor(pts_np / voxel_size).astype(int)
+
+ # Create a unique key for each voxel
+ # Using string key since numpy arrays aren't hashable
+ voxel_keys = [','.join(map(str, idx)) for idx in voxel_indices]
+
+ # Count points in each voxel
+ from collections import Counter
+ voxel_counts = Counter(voxel_keys)
+
+ # Map back to point densities
+ point_densities = np.array([voxel_counts[key] for key in voxel_keys])
+
+ # Convert to sampling probabilities
+ eps = 10
+ inverse_density = 1.0 / (point_densities + eps)
+ probabilities = inverse_density / np.sum(inverse_density)
+
+ # Sample points
+ sampled_indices = np.random.choice(
+ len(pts_np),
+ size=num_samples,
+ p=probabilities,
+ replace=True
+ )
+
+ # Return in same format as input
+ if is_torch:
+ return pts_to_sample[sampled_indices], sampled_indices
+ return pts_np[sampled_indices], sampled_indices
+
+def convex_interp(pts_end_1: Union[torch.Tensor, np.ndarray],
+ sdfs_end_1: Union[torch.Tensor, np.ndarray, None] = None,
+ pts_end_2: Union[torch.Tensor, np.ndarray, None] = None,
+ sdfs_end_2: Union[torch.Tensor, np.ndarray, None] = None,
+ power: float = 1.0, num_samples: Optional[int] = None):
+ '''
+ Create two-point interpolation among all points:
+ power: if > 1, the interpolation is towards the second point.
+ If < 1, the interpolation is towards the first point.
+ '''
+ # May interpolate among the same set of points
+ if pts_end_2 is None:
+ assert sdfs_end_2 is None
+ pts_end_2 = pts_end_1
+ sdfs_end_2 = sdfs_end_1
+
+ # May just interpolate the points without sdf
+ interp_sdf = sdfs_end_1 is not None
+ if interp_sdf:
+ assert sdfs_end_2 is not None
+ # pts and sdfs should have the same number of points
+ assert pts_end_1.shape[0] == sdfs_end_1.shape[0], f'{pts_end_1.shape[0]=}, {sdfs_end_1.shape[0]=}'
+ assert pts_end_2.shape[0] == sdfs_end_2.shape[0], f'{pts_end_2.shape[0]=}, {sdfs_end_2.shape[0]=}'
+
+ # check if input is torch or numpy
+ is_torch = isinstance(pts_end_1, torch.Tensor)
+ if is_torch:
+ assert isinstance(pts_end_2, torch.Tensor)
+ if interp_sdf:
+ assert isinstance(sdfs_end_1, torch.Tensor)
+ assert isinstance(sdfs_end_2, torch.Tensor)
+ else:
+ assert isinstance(pts_end_1, np.ndarray)
+ assert isinstance(pts_end_2, np.ndarray)
+ if interp_sdf:
+ assert isinstance(sdfs_end_1, np.ndarray)
+ assert isinstance(sdfs_end_2, np.ndarray)
+
+ # Select the same number of points for both ends (sample or use all)
+ if num_samples is not None:
+ if is_torch:
+ idx_sample_1 = torch.randint(0, pts_end_1.shape[0], (num_samples,))
+ idx_sample_2 = torch.randint(0, pts_end_2.shape[0], (num_samples,))
+ else:
+ # This is equivalent to np.random.randint(0, pts_end_1.shape[0], num_samples)
+ idx_sample_1 = np.random.choice(pts_end_1.shape[0], num_samples)
+ idx_sample_2 = np.random.choice(pts_end_2.shape[0], num_samples)
+ pts_end_1 = pts_end_1[idx_sample_1]
+ pts_end_2 = pts_end_2[idx_sample_2]
+ if interp_sdf:
+ sdfs_end_1 = sdfs_end_1[idx_sample_1]
+ sdfs_end_2 = sdfs_end_2[idx_sample_2]
+ else:
+ assert pts_end_1.shape[0] == pts_end_2.shape[0], f'{pts_end_1.shape[0]=}, {pts_end_2.shape[0]=}'
+
+ # Interpolate
+ if is_torch:
+ weights = torch.rand(pts_end_1.shape[0]).float().to(pts_end_1.device)
+ else:
+ weights = np.random.rand(pts_end_1.shape[0])
+
+ # Apply power to skew the interpolation
+ weights = weights ** power
+ pts_interp = pts_end_1 * weights.reshape(-1, 1) + pts_end_2 * (1 - weights).reshape(-1, 1)
+ if interp_sdf:
+ sdfs_interp = sdfs_end_1 * weights + sdfs_end_2 * (1 - weights)
+ return pts_interp, sdfs_interp
+ return pts_interp
+
+def convex_volume_interp_sampling_uniform(pts, sdfs, num_samples):
+ '''
+ Create four-point interpolation among all points:
+ '''
+ ### randomly sample interpolation points
+ ### sample the indices with replacement
+ idx_sample_1 = torch.randint(0, pts.shape[0], (num_samples,))
+ idx_sample_2 = torch.randint(0, pts.shape[0], (num_samples,))
+ idx_sample_3 = torch.randint(0, pts.shape[0], (num_samples,))
+ idx_sample_4 = torch.randint(0, pts.shape[0], (num_samples,))
+ weights_1st = torch.rand(num_samples).float().cuda()
+ weights_2nd = torch.rand(num_samples).float().cuda()
+ weights_1 = weights_1st * weights_2nd
+ weights_2 = (1 - weights_1st) * weights_2nd
+ weights_3 = weights_1st * (1 - weights_2nd)
+ weights_4 = (1 - weights_1st) * (1 - weights_2nd)
+ pts_sample_1 = pts[idx_sample_1]
+ pts_sample_2 = pts[idx_sample_2]
+ pts_sample_3 = pts[idx_sample_3]
+ pts_sample_4 = pts[idx_sample_4]
+ sdfs_sample_1 = sdfs[idx_sample_1]
+ sdfs_sample_2 = sdfs[idx_sample_2]
+ sdfs_sample_3 = sdfs[idx_sample_3]
+ sdfs_sample_4 = sdfs[idx_sample_4]
+ ### interpolate the sdf values
+ pts_interp = pts_sample_1 * weights_1.view(-1, 1) + pts_sample_2 * weights_2.view(-1, 1) + \
+ pts_sample_3 * weights_3.view(-1, 1) + pts_sample_4 * weights_4.view(-1, 1)
+ sdfs_interp = sdfs_sample_1 * weights_1 + sdfs_sample_2 * weights_2 + \
+ sdfs_sample_3 * weights_3 + sdfs_sample_4 * weights_4
+ # pts_interp = pts_sample_1 * weights.view(-1, 1) + pts_sample_2 * (1 - weights).view(-1, 1)
+ # sdfs_interp = sdfs_sample_1 * weights + sdfs_sample_2 * (1 - weights)
+ return pts_interp, sdfs_interp
+
+def create_2d_slice_around_point(points, anchor_dim, range, resolution):
+ '''
+ @points: (n,3) (x,y,z): points to create the slice around
+ @anchor_dim: int 0,1,2: which dimension to anchor the slice at a location
+ @range: (2,) (min,max): range of the slice
+ @resolution: int: number of points in one dimension
+ '''
+ assert anchor_dim in [0,1,2]
+ assert len(range) == 2
+ assert resolution > 0
+ assert points.ndim == 2
+ assert points.shape[1] == 3
+
+ grid_axis_0 = torch.linspace(range[0],range[1],resolution).float().cuda()
+ grid_axis_1 = torch.linspace(range[0],range[1],resolution).float().cuda()
+ grid_0, grid_1 = torch.meshgrid(grid_axis_0,grid_axis_1)
+ grid_0_batch = grid_0.unsqueeze(0).expand(len(points),resolution,resolution)
+ grid_1_batch = grid_1.unsqueeze(0).expand(len(points),resolution,resolution)
+ grid_anchor = torch.ones_like(grid_0) * points[:,anchor_dim].unsqueeze(1).unsqueeze(1)
+ if anchor_dim == 0:
+ slices = torch.stack((grid_anchor,grid_0_batch,grid_1_batch),dim=-1)
+ elif anchor_dim == 1:
+ slices = torch.stack((grid_0_batch,grid_anchor,grid_1_batch),dim=-1)
+ elif anchor_dim == 2:
+ slices = torch.stack((grid_0_batch,grid_1_batch,grid_anchor),dim=-1)
+ return slices
+
+# TODO This one is currently unused; figure out if we want to use it or not.
+def get_hcsdf_loss_simple(predicted_sdf, sdf_lower_bound, cfg):
+ '''Hyperplane-constrained signed distance loss. This loss function ensures
+ that the signed distance function of a point is at least a minimum bounds
+ provided by contactnets.
-def get_hcsdf_loss_simple(predicted_sdf, sdf_bounds_from_cnets, cfg):
- '''Hyperplane-constrained signed distance loss. This loss function ensures that the signed distance function
- of a point is at least a minimum bounds provided by contactnets.
@predicted_sdf: torch.tensor (N_sample,1) network output
'''
- predicted_sdf = predicted_sdf.squeeze()
+ predicted_sdf = predicted_sdf.flatten()
- sdf_mask = sdf_bounds_from_cnets > -cfg['neg_trunc_ratio']
+ sdf_mask = sdf_lower_bound > -cfg['neg_trunc_ratio']
- sdf_bounds_from_cnets = sdf_bounds_from_cnets.squeeze()
- assert predicted_sdf.shape == sdf_bounds_from_cnets.shape
+ sdf_lower_bound = sdf_lower_bound.flatten()
+ assert predicted_sdf.shape == sdf_lower_bound.shape
assert predicted_sdf.ndim == 1
- sdf_diff_lower = sdf_bounds_from_cnets - predicted_sdf
+ sdf_diff_lower = sdf_lower_bound - predicted_sdf
# sdf_diff_upper = predicted_sdf - sdf_bounds_from_cnets
# empty_weight = 100 # cfg['empty_weight'] # 100
@@ -500,22 +912,25 @@ def get_hcsdf_loss_simple(predicted_sdf, sdf_bounds_from_cnets, cfg):
loss = sdf_loss_lower
return loss, torch.zeros_like(sdf_mask), sdf_mask
-def get_hcsdf_loss(predicted_sdf, sdf_bounds_from_cnets, cfg):
- '''Hyperplane-constrained signed distance loss. This loss function ensures that the signed distance function
- of a point is at least a minimum bounds provided by contactnets.
+# TODO check this function
+def get_hcsdf_loss(predicted_sdf, sdf_lower_bound, cfg):
+ '''Hyperplane-constrained signed distance loss. This loss function ensures
+ that the signed distance function of a point is at least a minimum bounds
+ provided by ContactNets.
+
@predicted_sdf: torch.tensor (N_sample,1) network output
'''
- predicted_sdf = predicted_sdf.squeeze()
+ predicted_sdf = predicted_sdf.flatten()
- empty_mask, sdf_mask = get_masks_sdf(sdf_bounds_from_cnets, cfg)
+ empty_mask, sdf_mask = get_masks_from_sdf_values(sdf_lower_bound, cfg)
# print(f'{empty_mask.to(torch.int64).sum()=}')
mask = empty_mask & (predicted_sdf<1)
- sdf_bounds_from_cnets = sdf_bounds_from_cnets.squeeze()
- assert predicted_sdf.shape == sdf_bounds_from_cnets.shape
- assert predicted_sdf.ndim == 1
- sdf_diff_lower = sdf_bounds_from_cnets - predicted_sdf
- sdf_diff_upper = predicted_sdf - sdf_bounds_from_cnets
+ sdf_lower_bound = sdf_lower_bound.flatten()
+ assert predicted_sdf.shape == sdf_lower_bound.shape
+ assert predicted_sdf.ndim == 1, f"{predicted_sdf.shape=}"
+ sdf_diff_lower = sdf_lower_bound - predicted_sdf
+ sdf_diff_upper = predicted_sdf - sdf_lower_bound
empty_weight = 100 # cfg['empty_weight'] # 100
hc_sdf_weight_lower = cfg['hc_sdf_weight_lower'] # 3000
@@ -535,57 +950,43 @@ def get_hcsdf_loss(predicted_sdf, sdf_bounds_from_cnets, cfg):
# print(f"{sdf_mask.sum()=}")
return loss, mask, sdf_mask
-def get_pretrain_sdf_loss(outputs, sdf_bounds_from_cnets, cfg):
- '''Hyperplane-constrained signed distance loss. This loss function ensures that the signed distance function
- of a point is at least a minimum bounds provided by contactnets.
- @predicted_sdf: torch.tensor (N_sample,1) network output
- '''
- predicted_sdf = outputs[..., :-3]
- predicted_sdf = predicted_sdf.squeeze()
+# TODO use this function
+def get_gradient_loss(predicted_sdf, predicted_gradient, target_gradient, cfg):
+ '''Gradient loss. This loss function ensures that the gradient of the
+ predicted SDF is close to the gradient provided by ContactNets.
- negative_mask, positive_mask, mid_mask = get_masks_sdf_pretrain(sdf_bounds_from_cnets, cfg)
- # print(f'{empty_mask.to(torch.int64).sum()=}')
- mask_p = positive_mask & (predicted_sdf<2)
- mask_n = negative_mask & (predicted_sdf>-2)
+ @predicted_sdf: torch.tensor (N_sample,) network output
+ @predicted_gradient: torch.tensor (N_sample, 3) network gradient
+ @target_gradient: torch.tensor (N_sample, 3) ground truth gradient
+ '''
+ predicted_sdf = predicted_sdf.flatten()
- sdf_bounds_from_cnets = sdf_bounds_from_cnets.squeeze()
- assert predicted_sdf.shape == sdf_bounds_from_cnets.shape
- assert predicted_sdf.ndim == 1
- # sdf_diff_lower = sdf_bounds_from_cnets - predicted_sdf
- # sdf_diff_upper = predicted_sdf - sdf_bounds_from_cnets
+ empty_mask, sdf_mask = get_masks_from_sdf_values(predicted_sdf, cfg)
+ mask = empty_mask & (predicted_sdf<1) & sdf_mask
- sdf_weight = 30 #cfg['empty_weight']
- interior_weight = 20 #cfg['hc_sdf_weight_lower']
- exterior_weight = 20 #cfg['hc_sdf_weight_upper']
+ assert predicted_gradient.shape == target_gradient.shape
+ assert predicted_gradient.ndim == 2
+ assert predicted_gradient.shape[1] == 3
- negative_loss = torch.mean(torch.abs(predicted_sdf + 2)*mask_n) * 0.5 * interior_weight
- positive_loss = torch.mean(torch.abs(predicted_sdf - 2)*mask_p) * 0.5 * exterior_weight
-
- mid_loss = torch.mean((predicted_sdf- sdf_bounds_from_cnets)**2 * mid_mask) * 0.1 * sdf_weight
+ dot_product = torch.einsum(
+ 'ij,ij->i', predicted_gradient[mask], target_gradient[mask])
+ loss = torch.linalg.norm(1 - dot_product)**2
+ loss *= cfg['gradient_weight']
- loss = positive_loss + mid_loss + negative_loss
- return loss #, mask, sdf_mask
+ return loss, mask, sdf_mask
-def get_pretrain_eikonal_loss(outputs, gts, cfg, truncation_scale):
- normals = outputs[..., -3:]
- predicted_sdf = outputs[..., 0]
- normals_norm = torch.norm(normals, dim=-1)
- normals_norm = normals_norm * truncation_scale
- mask = (gts < 2) & (gts > -2)
- normals_norm = normals_norm[mask]
- # print(f"{truncation_scale=}")
- # print(f"{normals_norm.amax()=}, {normals_norm.amin()=}, {normals_norm.mean()=}")
- eikonal_loss = ((normals_norm-1)**2).mean() * cfg['pretrain_eikonal_weight']
- # eikonal_loss = ((normals_norm/(normals_norm+1e-6)-1)**2).mean()*cfg['pretrain_eikonal_weight']
- return eikonal_loss
-def dirac_regularized(eps, x):
+### Pretraining loss terms
+def dirac_regularized_for_pretraining(eps, x):
return eps / np.pi / (eps**2 + x**2)
# return torch.exp(-x**2/eps**2)/(eps*np.sqrt(np.pi))
def get_pretrain_minimal_surface_loss(outputs, cfg):
'''output: (N_sample, 1)'''
- return torch.mean(dirac_regularized(cfg['eps_minimal_surface'], outputs[..., :-3])) * cfg['pretrain_minimal_surface_weight']
+ return torch.mean(
+ dirac_regularized_for_pretraining(
+ cfg['eps_minimal_surface'], outputs[..., :-3]
+ )) * cfg['pretrain_minimal_surface_weight']
def get_pretrain_hessian_loss(outputs, inputs_flat, cfg):
normals = outputs[..., -3:]
@@ -647,35 +1048,52 @@ def get_pretrain_normal_direction_loss(outputs, target_normals, gts, cfg, inputs
normal_direction_loss = torch.mean(1 - torch.sum((normals*target_normals)[mask], dim=-1)) * cfg['pretrain_normal_direction_weight']
return normal_direction_loss
-def visualize_normals(points, normals, target_normals, sample_size=1000):
- normals = normals.cpu().detach().numpy()
- points = points.cpu().detach().numpy()
- target_normals = target_normals.cpu().detach().numpy()
- idx = np.random.choice(points.shape[0], sample_size, replace=False)
- sampled_points = points[idx]
- sampled_normals = normals[idx]
- sampled_target_normals = target_normals[idx]
+def get_pretrain_sdf_loss(outputs, sdf_bounds_from_cnets, cfg):
+ '''Hyperplane-constrained signed distance loss. This loss function ensures that the signed distance function
+ of a point is at least a minimum bounds provided by contactnets.
+ @predicted_sdf: torch.tensor (N_sample,1) network output
+ '''
+ predicted_sdf = outputs[..., :-3]
+ predicted_sdf = predicted_sdf.squeeze()
- fig = plt.figure(figsize=(12, 12))
- ax = fig.add_subplot(111, projection='3d')
- ax.scatter(sampled_points[:, 0], sampled_points[:, 1], sampled_points[:, 2], color='black', s=5)
- for p, n, tn in zip(sampled_points, sampled_normals, sampled_target_normals):
- ax.quiver(p[0], p[1], p[2], n[0], n[1], n[2], length=0.1, color='red')
- ax.quiver(p[0], p[1], p[2], tn[0], tn[1], tn[2], length=0.1, color='blue')
+ negative_mask, positive_mask, mid_mask = get_masks_sdf_pretrain(sdf_bounds_from_cnets, cfg)
+ # print(f'{empty_mask.to(torch.int64).sum()=}')
+ mask_p = positive_mask & (predicted_sdf<2)
+ mask_n = negative_mask & (predicted_sdf>-2)
- ax.set_xlabel('X')
- ax.set_ylabel('Y')
- ax.set_zlabel('Z')
- plt.title('Red: Predicted Normals, Blue: Target Normals')
- plt.show()
+ sdf_bounds_from_cnets = sdf_bounds_from_cnets.squeeze()
+ assert predicted_sdf.shape == sdf_bounds_from_cnets.shape
+ assert predicted_sdf.ndim == 1
+ # sdf_diff_lower = sdf_bounds_from_cnets - predicted_sdf
+ # sdf_diff_upper = predicted_sdf - sdf_bounds_from_cnets
-def get_pts_sdf_loss(sdf, target_sdf):
- '''Target SDF of contact points are 0 and near-surface points are specified SDF.
- '''
- # target = torch.zeros_like(support_sdf)
- loss = F.mse_loss(sdf, target_sdf)
- return loss
+ sdf_weight = 30 #cfg['empty_weight']
+ interior_weight = 20 #cfg['hc_sdf_weight_lower']
+ exterior_weight = 20 #cfg['hc_sdf_weight_upper']
+
+ negative_loss = torch.mean(torch.abs(predicted_sdf + 2)*mask_n) * 0.5 * interior_weight
+ positive_loss = torch.mean(torch.abs(predicted_sdf - 2)*mask_p) * 0.5 * exterior_weight
+
+ mid_loss = torch.mean((predicted_sdf- sdf_bounds_from_cnets)**2 * mid_mask) * 0.1 * sdf_weight
+
+ loss = positive_loss + mid_loss + negative_loss
+ return loss #, mask, sdf_mask
+def get_pretrain_eikonal_loss(outputs, gts, cfg, truncation_scale):
+ normals = outputs[..., -3:]
+ predicted_sdf = outputs[..., 0]
+ normals_norm = torch.norm(normals, dim=-1)
+ normals_norm = normals_norm * truncation_scale
+ mask = (gts < 2) & (gts > -2)
+ normals_norm = normals_norm[mask]
+ # print(f"{truncation_scale=}")
+ # print(f"{normals_norm.amax()=}, {normals_norm.amin()=}, {normals_norm.mean()=}")
+ eikonal_loss = ((normals_norm-1)**2).mean() * cfg['pretrain_eikonal_weight']
+ # eikonal_loss = ((normals_norm/(normals_norm+1e-6)-1)**2).mean()*cfg['pretrain_eikonal_weight']
+ return eikonal_loss
+
+
+### Unused
def sample_points(vertices, surfaces, normals, distance=0.01, num_samples=10, device=None):
'''Sample points on the outside of surfaces.
'''
@@ -695,7 +1113,6 @@ def sample_points(vertices, surfaces, normals, distance=0.01, num_samples=10, de
sampled_points = np.array(sampled_points)
return torch.tensor(sampled_points, device=device, dtype=torch.float32)
-
def sample_points_and_distances(mesh, num_samples, device=None):
'''Sample directions and points and get corresponding distances from each point to the nearest hyperplane.
'''
@@ -718,7 +1135,6 @@ def sample_points_and_distances(mesh, num_samples, device=None):
return sampled_points, distances
-
def find_corresponding_pts(contact_points, sdf_points, tf, id, threshold=0.1):
'''
@contact_points: (N, 3) contact points from dair_pll
@@ -755,48 +1171,24 @@ def find_corresponding_pts(contact_points, sdf_points, tf, id, threshold=0.1):
# plt.savefig(f'vis_pts_{id}.png')
return valid_indices
-def ray_box_intersection_batch(origins, dirs, bounds):
- '''
- @origins: (N,3) origin and directions. In the same coordinate frame as the bounding box
- @bounds: (2,3) xyz_min and max
- '''
- if not torch.is_tensor(origins):
- origins = torch.tensor(origins)
- dirs = torch.tensor(dirs)
- if not torch.is_tensor(bounds):
- bounds = torch.tensor(bounds)
-
- dirs = dirs/(torch.norm(dirs,dim=-1,keepdim=True)+1e-10)
- inv_dirs = 1/dirs
- bounds = bounds[None].expand(len(dirs),-1,-1) #(N,2,3)
-
- sign = torch.zeros((len(dirs),3)).long().to(dirs.device) #(N,3)
- sign[:,0] = (inv_dirs[:,0] < 0)
- sign[:,1] = (inv_dirs[:,1] < 0)
- sign[:,2] = (inv_dirs[:,2] < 0)
-
- tmin = (torch.gather(bounds[...,0],dim=1,index=sign[:,0].reshape(-1,1)).reshape(-1) - origins[:,0]) * inv_dirs[:,0] #(N)
- tmin[tmin<0] = 0
- tmax = (torch.gather(bounds[...,0],dim=1,index=1-sign[:,0].reshape(-1,1)).reshape(-1) - origins[:,0]) * inv_dirs[:,0]
- tymin = (torch.gather(bounds[...,1],dim=1,index=sign[:,1].reshape(-1,1)).reshape(-1) - origins[:,1]) * inv_dirs[:,1]
- tymin[tymin<0] = 0
- tymax = (torch.gather(bounds[...,1],dim=1,index=1-sign[:,1].reshape(-1,1)).reshape(-1) - origins[:,1]) * inv_dirs[:,1]
-
- ishit = torch.ones(len(dirs)).bool().to(dirs.device)
- ishit[(tmin > tymax) | (tymin > tmax)] = 0
- tmin[tymin>tmin] = tymin[tymin>tmin]
- tmax[tymax tzmax) | (tzmin > tmax)] = 0
- tmin[tzmin>tmin] = tzmin[tzmin>tmin] #(N)
- tmax[tzmax= len(self.ids):
+ self.reset()
+
+ self.batch_ray_ids = self.ids[self.pos:self.pos + self.batch_size]
+ self.pos += self.batch_size
+ return self.rays[self.batch_ray_ids].cuda()
+
+ # For finite mode, stop after one complete pass
+ else:
+ if self.pos >= len(self.ids):
+ raise StopIteration
+
+ if self.pos + self.batch_size >= len(self.ids):
+ self.batch_ray_ids = self.ids[self.pos:]
+ self.pos = len(self.ids) # Mark as complete
+ else:
+ self.batch_ray_ids = self.ids[self.pos:self.pos + self.batch_size]
+ self.pos += self.batch_size
+
+ return self.rays[self.batch_ray_ids].cuda()
+
+ def __len__(self):
+ """Return the number of batches in an epoch."""
+ return (len(self.rays) + self.batch_size - 1) // self.batch_size
class NerfRunner:
- def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_masks=None,build_octree_pcd=None):
+ def __init__(self, cfg, images, depths, masks, normal_maps, poses, K,
+ _run=None, occ_masks=None, build_octree_pcd=None,
+ store_sdf_pts=True, store_slices=True, store_cnet_pts=True,
+ ps=None, sdfs=None, vs=None, sdf_bounds=None, ):
+ """NeRF.
+
+ Args:
+ cfg: A NeRF configuration dictionary.
+ """
set_seed(0)
self.cfg = cfg
self.cfg['tv_loss_weight'] = eval(str(self.cfg['tv_loss_weight']))
@@ -125,14 +173,115 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
self.mesh = None
self.train_pose = False
self.N_iters = self.cfg['n_step']+1
+ self.geom_dir = self.cfg['geometry_dir']
+ self.octree_with_cnet = self.cfg['octree_with_cnet']
+ self.octree_convex = self.cfg['octree_convex']
+ self.octree_contact_filter = self.cfg['octree_contact_filter']
+ self.store_sdf_pts = store_sdf_pts
+ self.store_slices = store_slices
+ # store_slices can be enabled even when contact points are not used (geom_dir is None)
+ self.store_cnet_pts = store_cnet_pts
+
self.build_octree_pts = np.asarray(build_octree_pcd.points).copy() # Make it pickable
+ # Check for and incorporate geometry information from PLL.
+ self.contact_in_cam = False
+ if ps is not None and sdfs is not None:
+ self.contact_in_cam = True
+ self.ps_in_cam = ps
+ self.sdfs_in_cam = sdfs
+ self.vs_in_cam = vs
+ self.sdf_bounds_in_cam = sdf_bounds
+
+ if self.geom_dir is not None:
+ self.ps = file_utils.load_geometry_support_points(self.geom_dir)
+ self.sdfs = file_utils.load_geometry_sdfs(self.geom_dir)
+ self.vs = file_utils.load_geometry_sdf_bounded_mesh_points(self.geom_dir)
+ self.sdf_bounds = file_utils.load_geometry_sdf_bounds(self.geom_dir)
+ self.ws = file_utils.load_geometry_sdf_gradient_mesh_points(self.geom_dir)
+ self.w_normals = file_utils.load_geometry_sdf_gradients(self.geom_dir)
+ print(f'>>>>>>>>>>>> {self.ps.shape=}, {self.sdfs.shape=}')
+ print(f'>>>>>>>>>>>> {self.vs.shape=}, {self.sdf_bounds.shape=}')
+ print(f'>>>>>>>>>>>> {self.ws.shape=}, {self.w_normals.shape=}')
+
+ # Load any previously stored offsets generated from the parent tracking
+ # experiment or this child's shape reconstruction experiment.
+ self.offset = file_utils.get_nerf_offset_transform(
+ self.cfg['debug_dir'], self.cfg['bundlesdf_run_id'])
+ self.ob_init_cam = file_utils.load_first_pose_in_cam(self.cfg['debug_dir'])
+
+ if self.octree_with_cnet or self.octree_contact_filter:
+ print(f'>>>>>>>>>>>> {self.build_octree_pts.shape=}')
+ print(f'>>>>>>>>>>>> {np.linalg.norm(self.build_octree_pts,axis=1).mean()}')
+
+ # Convert the support points and mesh points to normalized space
+ support_pts = \
+ contact_loss_utils.convert_meter_points_to_normalized_space_for_sdf(
+ points=self.ps, sc_factor=self.cfg['sc_factor'],
+ offset=self.offset, translation=self.cfg['translation'],
+ bsdf_init_pose=self.ob_init_cam
+ )
+ hpp_points = \
+ contact_loss_utils.convert_meter_points_to_normalized_space_for_sdf(
+ points=self.vs, sc_factor=self.cfg['sc_factor'],
+ offset=self.offset, translation=self.cfg['translation'],
+ bsdf_init_pose=self.ob_init_cam
+ )
+
+ support_pts_for_octree = support_pts.cpu().numpy().copy()
+ print(f'>>>>>>>>>>>> {support_pts_for_octree.shape=}')
+ print(f'>>>>>>>>>>>> {np.linalg.norm(support_pts_for_octree,axis=1).mean()}')
+
+ hpp_pts_for_octree = hpp_points.cpu().numpy().copy()
+ print(f'>>>>>>>>>>>> {hpp_pts_for_octree.shape=}')
+ print(f'>>>>>>>>>>>> {np.linalg.norm(hpp_pts_for_octree,axis=1).mean()}')
+
+ # Build octree to filter contact points that are in the octree
+ if self.octree_contact_filter:
+ self.octree_vision_m, self.ray_trace_level = self.build_octree_custom_points(self.build_octree_pts)
+ idx_support_pts_in_octree = self.octree_vision_m.get_center_ids(support_pts.cuda(), self.octree_vision_m.max_level)
+ idx_hpp_pts_in_octree = self.octree_vision_m.get_center_ids(hpp_points.cuda(), self.octree_vision_m.max_level)
+ ### filter out points that are in the octree (with index not -1)
+ idx_support_pts_in_octree_np = idx_support_pts_in_octree.cpu().numpy()
+ idx_hpp_pts_in_octree_np = idx_hpp_pts_in_octree.cpu().numpy()
+ support_pts_for_octree = support_pts_for_octree[idx_support_pts_in_octree_np==-1]
+ hpp_pts_for_octree = hpp_pts_for_octree[idx_hpp_pts_in_octree_np==-1]
+ ### filter self.ps and self.vs, self.sdfs, self.sdf_bounds as well
+ self.ps = self.ps[idx_support_pts_in_octree==-1]
+ self.sdfs = self.sdfs[idx_support_pts_in_octree==-1]
+ self.vs = self.vs[idx_hpp_pts_in_octree==-1]
+ self.sdf_bounds = self.sdf_bounds[idx_hpp_pts_in_octree==-1]
+ print('After filtering points in octree')
+ print(f'>>>>>>>>>>>> {support_pts_for_octree.shape=}')
+ print(f'>>>>>>>>>>>> {np.linalg.norm(support_pts_for_octree,axis=1).mean()}')
+ print(f'>>>>>>>>>>>> {hpp_pts_for_octree.shape=}')
+ print(f'>>>>>>>>>>>> {np.linalg.norm(hpp_pts_for_octree,axis=1).mean()}')
+ print(f'>>>>>>>>>>>> {self.ps.shape=}, {self.sdfs.shape=}')
+ print(f'>>>>>>>>>>>> {self.vs.shape=}, {self.sdf_bounds.shape=}')
+
+ if self.octree_with_cnet:
+ # plot_point_cloud(self.build_octree_pts, support_pts_for_octree)
+ self.build_octree_pts = np.concatenate((self.build_octree_pts, support_pts_for_octree, hpp_pts_for_octree),axis=0)
+
+ if self.octree_convex:
+ # Add points on the convex hull
+ mesh = trimesh.Trimesh(vertices=self.build_octree_pts)
+ hull = mesh.convex_hull
+ hull.process()
+ points, _ = trimesh.sample.sample_surface(hull, 10000)
+
+ pts_interp_octree = convex_interp(points, num_samples=10000)
+ points = np.concatenate((points, pts_interp_octree), axis=0)
+
+ self.build_octree_pts = np.concatenate((self.build_octree_pts, points), axis=0)
+
down_scale_ratio = cfg['down_scale_ratio']
+ assert down_scale_ratio == 1, "Down scale ratio other than 1 is not implemented for contact_in_cam"
self.down_scale = np.ones((2),dtype=np.float32)
if down_scale_ratio!=1:
H,W = images[0].shape[:2]
- ############## No interpolatio nto keep consistency
+ ############## No interpolation to keep consistency
down_scale_ratio = int(down_scale_ratio)
self.images = images[:, ::down_scale_ratio, ::down_scale_ratio]
self.depths = depths[:, ::down_scale_ratio, ::down_scale_ratio]
@@ -161,8 +310,8 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
self.global_step = 0
- print("sc_factor",self.cfg['sc_factor'])
- print("translation",self.cfg['translation'])
+ print("sc_factor", self.cfg['sc_factor'])
+ print("translation", self.cfg['translation'])
self.c2w_array = torch.tensor(poses).float().cuda()
@@ -175,10 +324,39 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
rays_.append(rays)
rays = np.concatenate(rays_, axis=0)
+ if self.contact_in_cam:
+ ps_sdf_ = []
+ vs_sdf_ = []
+ for i_ps in range(len(self.ps_in_cam)):
+ ps_sdf_i, vs_sdf_i = self.make_frame_contacts(i_ps)
+ if ps_sdf_i is not None:
+ ps_sdf_.append(ps_sdf_i)
+ if vs_sdf_i is not None:
+ vs_sdf_.append(vs_sdf_i)
+
+ self.ps_batch_size = 10000
+ self.vs_batch_size = 10000
+
+ if len(ps_sdf_)>0:
+ ps_sdf = torch.cat(ps_sdf_, dim=0)
+ self.ps_sdf = ps_sdf
+ self.data_loader_ps = DataLoader(rays=self.ps_sdf, batch_size=self.ps_batch_size)
+ else:
+ self.ps_sdf = None
+ self.data_loader_ps = None
+
+ if len(vs_sdf_)>0:
+ vs_sdf = torch.cat(vs_sdf_, dim=0)
+ self.vs_sdf = vs_sdf
+ self.data_loader_vs = DataLoader(rays=self.vs_sdf, batch_size=self.vs_batch_size)
+ else:
+ self.vs_sdf = None
+ self.data_loader_vs = None
if self.cfg['denoise_depth_use_octree_cloud']:
logging.info("denoise cloud")
- mask = (rays[:,self.ray_mask_slice]>0) & (rays[:,self.ray_depth_slice]<=self.cfg['far']*self.cfg['sc_factor'])
+ mask = (rays[:,self.ray_mask_slice]>0) & \
+ (rays[:,self.ray_depth_slice]<=self.cfg['far']*self.cfg['sc_factor'])
rays_dir = rays[mask][:,self.ray_dir_slice]
rays_depth = rays[mask][:,self.ray_depth_slice]
pts3d = rays_dir*rays_depth.reshape(-1,1)
@@ -187,7 +365,7 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
logging.info(f"Denoising rays based on octree cloud")
kdtree = cKDTree(self.build_octree_pts)
- dists,indices = kdtree.query(pts3d_w,k=1,workers=-1)
+ dists, indices = kdtree.query(pts3d_w,k=1,workers=-1)
bad_mask = dists>0.02*self.cfg['sc_factor']
bad_ids = np.arange(len(rays))[mask][bad_mask]
rays[bad_ids,self.ray_depth_slice] = BAD_DEPTH*self.cfg['sc_factor']
@@ -198,46 +376,11 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
rays = torch.tensor(rays, dtype=torch.float).cuda()
self.rays = rays
- print("rays",rays.shape)
+ print("rays", rays.shape)
self.data_loader = DataLoader(rays=self.rays, batch_size=self.cfg['N_rand'])
- if self.cfg['support_pts'] and self.cfg['sdfs_from_cnets']:
- self.support_pts = torch.load(self.cfg['support_pts'])
- self.sdfs_from_cnets = torch.load(self.cfg['sdfs_from_cnets']).float().cuda()
- print(f'>>>>>>>>>>>> {self.support_pts.shape=},{self.sdfs_from_cnets.shape=}')
- if self.cfg['sampled_pts'] and self.cfg['sdf_bounds_from_cnets']:
- self.sampled_pts = torch.load(self.cfg['sampled_pts'])
- self.sdf_bounds_from_cnets = torch.load(self.cfg['sdf_bounds_from_cnets']).float().cuda()
- # self.offset = np.loadtxt(self.cfg['debug_dir']+'offset.txt')
- print(f'>>>>>>>>>>>> {self.sampled_pts.shape=},{self.sdf_bounds_from_cnets.shape=}')
- if not os.path.exists(self.cfg['debug_dir']+'offset.txt'):
- self.offset = None
- else:
- self.offset = np.loadtxt(self.cfg['debug_dir']+'offset.txt')
- self.annotated_poses_dir = self.cfg['data_dir']+'/annotated_poses/'
- self.normalized_mesh_path = './assets/mesh_cleaned_cube2.obj'
- # self.normalized_mesh_path = 'output/cube_2/mesh_cleaned.obj'
-
- if self.cfg['support_pts'] and self.cfg['sdfs_from_cnets']:
- cnets_mesh_dir = './assets/test_004.obj' # pll output mesh from 23 trajectories
- self.mesh_nearby_pts, self.mesh_nearby_sdfs, _, self.surface_normals = generate_mesh_pts(self.offset,self.cfg['translation'],self.cfg['sc_factor'],self.annotated_poses_dir,cnets_mesh_dir,num_surface=50000, num_inner=10, num_outer=10, dist=0.2)
- self.mesh_nearby_pts = self.mesh_nearby_pts.float().cuda()
- self.mesh_nearby_sdfs = self.mesh_nearby_sdfs.float().cuda()
- self.surface_normals = self.surface_normals.float().cuda()
- self.truncation_scale = self.get_truncation()
- self.mesh_nearby_sdfs = self.mesh_nearby_sdfs / self.truncation_scale
- print(f'{self.mesh_nearby_pts.shape=}, {self.mesh_nearby_sdfs.shape=}, {self.surface_normals.shape=}')
- # torch.save(mesh_nearby_pts.cpu(), 'mesh_nearby_pts.pt')
- # torch.save(mesh_nearby_sdfs.cpu(), 'mesh_nearby_sdfs.pt')
- # print(f"{mesh_nearby_sdfs.amax()=}, {mesh_nearby_sdfs.amin()=}")
-
- if self.cfg['support_pts'] and self.cfg['sdfs_from_cnets']:
- if os.path.exists(self.cfg['debug_dir']+'T_support_points.pt'):
- logging.info('Loading icp transformation')
- self.T_support_points = torch.load(self.cfg['debug_dir']+'T_support_points.pt').numpy()
- else:
- num_samples = 10000
- _, self.T_support_points = post_process_generated_points(self.support_pts, self.offset, self.cfg['translation'], self.cfg['sc_factor'], self.annotated_poses_dir, self.normalized_mesh_path, num_samples, T=None)
- torch.save(torch.tensor(self.T_support_points), self.cfg['debug_dir']+'T_support_points.pt')
+
+
+ # Store some PLL data-related loss information for inspection.
self.all_cps_sdf = []
self.all_cps_empty = []
self.all_cps_sdf_predsdf = []
@@ -253,34 +396,67 @@ def __init__(self,cfg,images,depths,masks,normal_maps,poses,K,_run=None,occ_mask
self.cps_finished = 0
self.hps_finished = 0
+ self.slice_cps = []
+ self.slice_cps_anchor = []
+ self.slice_cps_predsdf = []
+
+ self.free_pts = []
+ self.free_pts_predsdf = []
+
+ self.near_pts = []
+ self.near_pts_predsdf = []
+
+ self.uncertain_pts = []
+ self.uncertain_pts_predsdf = []
+
+ self.interp_pts = []
+ self.interp_pts_predsdf = []
+ self.interp_pts_sdfub = []
+ self.interp_end_pts = []
+ self.interp_end_pts_sdf = []
+
+ self.interp_pts_vision = []
+ self.interp_pts_predsdf_vision = []
+ self.interp_pts_sdfub_vision = []
+
def create_nerf(self,device=torch.device("cuda")):
"""Instantiate NeRF's MLP model.
"""
models = {}
- embed_fn, input_ch = get_embedder(self.cfg['multires'], self.cfg, i=self.cfg['i_embed'], octree_m=self.octree_m)
+ embed_fn, input_ch = get_embedder(
+ self.cfg['multires'], self.cfg, i=self.cfg['i_embed'],
+ octree_m=self.octree_m)
embed_fn = embed_fn.to(device)
models['embed_fn'] = embed_fn
input_ch_views = 0
embeddirs_fn = None
if self.cfg['use_viewdirs']:
- embeddirs_fn, input_ch_views = get_embedder(self.cfg['multires_views'], self.cfg, i=self.cfg['i_embed_views'], octree_m=self.octree_m)
+ embeddirs_fn, input_ch_views = get_embedder(
+ self.cfg['multires_views'], self.cfg, i=self.cfg['i_embed_views'],
+ octree_m=self.octree_m)
models['embeddirs_fn'] = embeddirs_fn
output_ch = 4
skips = [4]
- model = NeRFSmall(num_layers=2,hidden_dim=64,geo_feat_dim=15,num_layers_color=3,hidden_dim_color=64,input_ch=input_ch, input_ch_views=input_ch_views+self.cfg['frame_features']).to(device)
+ model = NeRFSmall(
+ num_layers=2, hidden_dim=64, geo_feat_dim=15, num_layers_color=3,
+ hidden_dim_color=64, input_ch=input_ch,
+ input_ch_views=input_ch_views+self.cfg['frame_features']).to(device)
model = model.to(device)
models['model'] = model
model_fine = None
if self.cfg['N_importance'] > 0:
if not self.cfg['share_coarse_fine']:
- model_fine = NeRFSmall(num_layers=2,hidden_dim=64,geo_feat_dim=15,num_layers_color=3,hidden_dim_color=64,input_ch=input_ch, input_ch_views=input_ch_views).to(device)
+ model_fine = NeRFSmall(
+ num_layers=2, hidden_dim=64, geo_feat_dim=15, num_layers_color=3,
+ hidden_dim_color=64, input_ch=input_ch, input_ch_views=input_ch_views
+ ).to(device)
models['model_fine'] = model_fine
- model_sdf = NeRFSmallSDF(num_layers=2,hidden_dim=64,input_ch=32)
+ model_sdf = NeRFSmallSDF(num_layers=2, hidden_dim=64, input_ch=32)
model_sdf = model_sdf.to(device)
models['model_sdf'] = model_sdf
@@ -288,16 +464,38 @@ def create_nerf(self,device=torch.device("cuda")):
num_training_frames = len(self.images)
feature_array = None
if self.cfg['frame_features'] > 0:
- feature_array = FeatureArray(num_training_frames, self.cfg['frame_features']).to(device)
+ feature_array = FeatureArray(num_training_frames,
+ self.cfg['frame_features']).to(device)
models['feature_array'] = feature_array
# Create pose array
pose_array = None
if self.cfg['optimize_poses']:
- pose_array = PoseArray(num_training_frames,max_trans=self.cfg['max_trans']*self.cfg['sc_factor'],max_rot=self.cfg['max_rot']).to(device)
+ pose_array = PoseArray(
+ num_training_frames,
+ max_trans=self.cfg['max_trans']*self.cfg['sc_factor'],
+ max_rot=self.cfg['max_rot']).to(device)
models['pose_array'] = pose_array
self.models = models
+ def make_frame_contacts(self,frame_id):
+ ### concatenate contact points together with their frame_id information
+ ps = self.ps_in_cam[frame_id]
+ sdfs = self.sdfs_in_cam[frame_id]
+ vs = self.vs_in_cam[frame_id]
+ sdf_bounds = self.sdf_bounds_in_cam[frame_id]
+
+ ps_sdf = None
+ vs_sdf = None
+ if ps is not None:
+ ps_sdf = torch.cat([ps, sdfs.reshape(-1,1).to(torch.float), frame_id*torch.ones(ps.shape[0],1)], axis=-1)
+ if vs is not None:
+ vs_sdf = torch.cat([vs, sdf_bounds.reshape(-1,1).to(torch.float), frame_id*torch.ones(vs.shape[0],1)], axis=-1)
+ self.contact_ps_slice = [0,1,2]
+ self.contact_sdfs_slice = 3
+ self.contact_frame_ids_slice = 4
+ return ps_sdf, vs_sdf
+
def make_frame_rays(self,frame_id):
mask = self.masks[frame_id,...,0].copy()
@@ -309,7 +507,10 @@ def make_frame_rays(self,frame_id):
rays = np.concatenate([rays, self.normal_maps[frame_id]], -1) # [H, W, 11]
rays = np.concatenate([rays, frame_id*np.ones(self.depths[frame_id].shape)], -1) # [H, W, 12]
ray_types = np.zeros((self.H,self.W,1)) # 0 is good; 1 is invalid depth (uncertain)
- invalid_depth = ((self.depths[frame_id,...,0]self.cfg['far']*self.cfg['sc_factor'])) & (mask>0)
+ invalid_depth = (
+ (self.depths[frame_id,...,0]self.cfg['far']*self.cfg['sc_factor'])) & \
+ (mask>0)
ray_types[invalid_depth] = 1
rays = np.concatenate((rays,ray_types), axis=-1)
self.ray_dir_slice = [0,1,2]
@@ -347,7 +548,8 @@ def make_frame_rays(self,frame_id):
vs,us = np.where(mask>0)
cur_rays = rays[vs,us].reshape(-1,n)
cur_rays = cur_rays[cur_rays[:,self.ray_type_slice]==0]
- cur_rays = compute_near_far_and_filter_rays(self.poses[frame_id],cur_rays,self.cfg)
+ cur_rays = compute_near_far_and_filter_rays(self.poses[frame_id], cur_rays,
+ self.cfg)
if self.normal_maps is not None:
self.ray_near_slice = 13
self.ray_far_slice = 14
@@ -356,15 +558,18 @@ def make_frame_rays(self,frame_id):
self.ray_far_slice = 11
if self.cfg['use_octree']:
- rays_o_world = (self.poses[frame_id]@to_homo(np.zeros((len(cur_rays),3))).T).T[:,:3]
+ rays_o_world = (self.poses[frame_id] @ \
+ to_homo(np.zeros((len(cur_rays),3))).T).T[:,:3]
rays_o_world = torch.from_numpy(rays_o_world).cuda().float()
- rays_unit_d_cam = cur_rays[:,:3]/np.linalg.norm(cur_rays[:,:3],axis=-1).reshape(-1,1)
+ rays_unit_d_cam = cur_rays[:,:3] / \
+ np.linalg.norm(cur_rays[:,:3],axis=-1).reshape(-1,1)
rays_d_world = (self.poses[frame_id][:3,:3]@rays_unit_d_cam.T).T
rays_d_world = torch.from_numpy(rays_d_world).cuda().float()
vox_size = self.cfg['octree_raytracing_voxel_size']*self.cfg['sc_factor']
level = int(np.floor(np.log2(2.0/vox_size)))
- near,far,_,ray_depths_in_out = self.octree_m.ray_trace(rays_o_world,rays_d_world,level=level)
+ near, far, _, ray_depths_in_out = self.octree_m.ray_trace(
+ rays_o_world, rays_d_world, level=level)
near = near.cpu().numpy()
valid = (near>0).reshape(-1)
cur_rays = cur_rays[valid]
@@ -405,7 +610,8 @@ def compute_rays_z_in_out(self):
self.z_in_out = z_in_out
- def add_new_frames(self,images,depths,masks,normal_maps,poses,occ_masks=None, new_pcd=None, reuse_weights=False):
+ def add_new_frames(self,images,depths,masks,normal_maps,poses,occ_masks=None, new_pcd=None, reuse_weights=False,
+ ps=None, sdfs=None, vs=None, sdf_bounds=None):
'''Add new frames and continue training
@images: (N,H,W,3) new images
@poses: All frames, they need to reset
@@ -425,6 +631,12 @@ def add_new_frames(self,images,depths,masks,normal_maps,poses,occ_masks=None, ne
self.depths = np.concatenate((self.depths, depths), axis=0)
self.masks = np.concatenate((self.masks, masks), axis=0)
+ if self.contact_in_cam:
+ self.ps_in_cam = self.ps_in_cam + ps
+ self.sdfs_in_cam = self.sdfs_in_cam + sdfs
+ self.vs_in_cam = self.vs_in_cam + vs
+ self.sdf_bounds_in_cam = self.sdf_bounds_in_cam + sdf_bounds
+
self.poses = poses.copy()
self.c2w_array = torch.tensor(poses, dtype=torch.float).cuda()
@@ -464,6 +676,37 @@ def add_new_frames(self,images,depths,masks,normal_maps,poses,occ_masks=None, ne
rays_.append(rays)
rays = np.concatenate(rays_, axis=0)
+ if self.contact_in_cam:
+ ps_sdf_ = []
+ vs_sdf_ = []
+ for i_ps in range(prev_n_image, len(self.ps_in_cam)):
+ ps_sdf_i, vs_sdf_i = self.make_frame_contacts(i_ps)
+ if ps_sdf_i is not None:
+ ps_sdf_.append(ps_sdf_i)
+ if vs_sdf_i is not None:
+ vs_sdf_.append(vs_sdf_i)
+
+ if len(ps_sdf_)>0:
+ ps_sdf = torch.cat(ps_sdf_, dim=0)
+ if self.ps_sdf is not None:
+ self.ps_sdf = torch.cat((self.ps_sdf, ps_sdf), dim=0)
+ else:
+ self.ps_sdf = ps_sdf
+
+ if len(vs_sdf_)>0:
+ vs_sdf = torch.cat(vs_sdf_, dim=0)
+ if self.vs_sdf is not None:
+ self.vs_sdf = torch.cat((self.vs_sdf, vs_sdf), dim=0)
+ else:
+ self.vs_sdf = vs_sdf
+
+ self.ps_batch_size = 1000
+ self.vs_batch_size = 5000
+ if self.ps_sdf is not None:
+ self.data_loader_ps = DataLoader(rays=self.ps_sdf, batch_size=self.ps_batch_size)
+ if self.vs_sdf is not None:
+ self.data_loader_vs = DataLoader(rays=self.vs_sdf, batch_size=self.vs_batch_size)
+
if self.cfg['denoise_depth_use_octree_cloud']:
logging.info("denoise cloud")
mask = (rays[:,self.ray_mask_slice]>0) & (rays[:,self.ray_depth_slice]<=self.cfg['far']*self.cfg['sc_factor'])
@@ -488,10 +731,47 @@ def add_new_frames(self,images,depths,masks,normal_maps,poses,occ_masks=None, ne
self.data_loader = DataLoader(rays=self.rays, batch_size=self.cfg['N_rand'])
+ def build_octree_custom_points(self, build_octree_pts):
+ pts = torch.tensor(build_octree_pts).cuda().float() # Must be within [-1,1]
+ octree_smallest_voxel_size = self.cfg['octree_smallest_voxel_size']*self.cfg['sc_factor']
+ finest_n_voxels = 2.0/octree_smallest_voxel_size
+ max_level = int(np.ceil(np.log2(finest_n_voxels)))
+ octree_smallest_voxel_size = 2.0/(2**max_level)
+
+ #################### Dilate
+ dilate_radius = int(np.ceil(self.cfg['octree_dilate_size']/self.cfg['octree_smallest_voxel_size']))
+ dilate_radius = max(1, dilate_radius)
+ logging.info(f"Octree voxel dilate_radius:{dilate_radius}")
+ shifts = []
+ for dx in [-1,0,1]:
+ for dy in [-1,0,1]:
+ for dz in [-1,0,1]:
+ shifts.append([dx,dy,dz])
+ shifts = torch.tensor(shifts).cuda().long() # (27,3)
+ coords = torch.floor((pts+1)/octree_smallest_voxel_size).long() #(N,3)
+ dilated_coords = coords.detach().clone()
+ for iter in range(dilate_radius):
+ dilated_coords = (dilated_coords[None].expand(shifts.shape[0],-1,-1) + shifts[:,None]).reshape(-1,3)
+ dilated_coords = torch.unique(dilated_coords,dim=0)
+ pts = (dilated_coords+0.5) * octree_smallest_voxel_size - 1
+ pts = torch.clip(pts,-1,1)
+
+ assert pts.min()>=-1 and pts.max()<=1
+ octree_m = OctreeManager(pts, max_level)
+
+ vox_size = self.cfg['octree_raytracing_voxel_size']*self.cfg['sc_factor']
+ ray_trace_level = int(np.floor(np.log2(2.0/vox_size)))
+ print(f"ray_trace_level={ray_trace_level}") # 2
+ print(f'vox_size={vox_size}') # 0.26
+ print('max_level=', max_level) # 3
+
+ dir = f"{self.cfg['nerf_temp_dir']}/octree_custom_boxes_ray_tracing_level.ply"
+ octree_m.draw_boxes(level=ray_trace_level,outfile=dir)
+ return octree_m, ray_trace_level
def build_octree(self):
if self.cfg['save_octree_clouds']:
- dir = f"{self.cfg['save_dir']}/build_octree_cloud.ply"
+ dir = f"{self.cfg['nerf_temp_dir']}/build_octree_cloud.ply"
pcd = toOpen3dCloud(self.build_octree_pts)
o3d.io.write_point_cloud(dir,pcd)
if self._run is not None:
@@ -522,7 +802,7 @@ def build_octree(self):
if self.cfg['save_octree_clouds']:
pcd = toOpen3dCloud(pts.data.cpu().numpy())
- dir = f"{self.cfg['save_dir']}/build_octree_cloud_dilated.ply"
+ dir = f"{self.cfg['nerf_temp_dir']}/build_octree_cloud_dilated.ply"
o3d.io.write_point_cloud(dir,pcd)
if self._run is not None:
self._run.add_artifact(dir)
@@ -532,14 +812,14 @@ def build_octree(self):
self.octree_m = OctreeManager(pts, max_level)
if self.cfg['save_octree_clouds']:
- dir = f"{self.cfg['save_dir']}/octree_boxes_max_level.ply"
+ dir = f"{self.cfg['nerf_temp_dir']}/octree_boxes_max_level.ply"
self.octree_m.draw_boxes(level=max_level,outfile=dir)
if self._run is not None:
self._run.add_artifact(dir)
vox_size = self.cfg['octree_raytracing_voxel_size']*self.cfg['sc_factor']
level = int(np.floor(np.log2(2.0/vox_size)))
if self.cfg['save_octree_clouds']:
- dir = f"{self.cfg['save_dir']}/octree_boxes_ray_tracing_level.ply"
+ dir = f"{self.cfg['nerf_temp_dir']}/octree_boxes_ray_tracing_level.ply"
self.octree_m.draw_boxes(level=level,outfile=dir)
if self._run is not None:
self._run.add_artifact(dir)
@@ -601,7 +881,7 @@ def load_weights(self,ckpt_path):
self.optimizer.load_state_dict(ckpt['optimizer'])
- def save_weights(self,out_file,models):
+ def save_weights(self, out_file, models):
data = {
'global_step': self.global_step,
'model': models['model'].state_dict(),
@@ -625,7 +905,7 @@ def save_weights(self,out_file,models):
if self._run is not None:
self._run.add_artifact(dir)
dir1 = copy.deepcopy(dir)
- dir = f'{os.path.dirname(out_file)}/model_latest.pth'
+ dir = f'{op.dirname(out_file)}/model_latest.pth'
if dir1!=dir:
os.system(f'cp {dir1} {dir}')
if self._run is not None:
@@ -732,431 +1012,32 @@ def get_truncation(self):
return truncation
- def train_loop(self,batch):
- target_s = batch[:, self.ray_rgb_slice] # Color (N,3)
- target_d = batch[:, self.ray_depth_slice] # Normalized scale (N)
-
- target_mask = batch[:,self.ray_mask_slice].bool().reshape(-1)
- frame_ids = batch[:,self.ray_frame_id_slice]
-
- ##### Calculate sdf from 3d points
- contact_pts_loss = torch.tensor(0)
- if False: #Skip below for now
- offset = np.loadtxt(self.cfg['debug_dir']+'offset.txt')
- translation = self.cfg['translation']
- sc_factor = self.cfg['sc_factor']
- annotated_poses_dir = self.cfg['data_dir']+'/annotated_poses/'
- contact_pts, contact_and_near_surface_sdf, self.T_support_points = generate_contact_pts(offset,translation,sc_factor,annotated_poses_dir,num_samples=3000)
- contact_pts = torch.from_numpy(contact_pts).float().cuda()
- torch.save(torch.tensor(self.T_support_points), 'T_support_points.pt')
- print('T_support_points saved!!!!!!!!!!!')
- torch.save(contact_pts.cpu(), 'contact_pts_new.pt')
- print('contact_pts_new saved!!!!!!!!!!!')
- contact_and_near_surface_sdf = torch.from_numpy(contact_and_near_surface_sdf).float().cuda()
- contact_sigma = []
- chunk = self.cfg['netchunk']
- for i in range(0,contact_pts.shape[0],chunk):
- inputs = contact_pts[i:i+chunk]
- outputs,valid_samples_ = self.run_network_density(inputs=inputs)
- # outputs = outputs.detach()
- contact_sigma.append(outputs)
- contact_sigma = torch.cat(contact_sigma,dim=0).float()
- contact_sigma = np.squeeze(contact_sigma)
- # print(f'avg sdf: {torch.mean(torch.abs(contact_sigma),dim=0)}')
- contact_pts_loss = get_pts_sdf_loss(contact_sigma, contact_and_near_surface_sdf) * self.cfg['contact_pts_weight']
- # logging.info(f"contact_pts_loss: {contact_pts_loss}")
- loss = contact_pts_loss
- #######
-
- rgb, extras = self.render(rays=batch, ray_ids=self.data_loader.batch_ray_ids, frame_ids=frame_ids,depth=target_d,lindisp=False,perturb=True,raw_noise_std=self.cfg['raw_noise_std'], near=batch[:,self.ray_near_slice], far=batch[:,self.ray_far_slice], get_normals=False)
- valid_samples = extras['valid_samples'] #(N_ray,N_samples)
- z_vals = extras['z_vals'] # [N_rand, N_samples + N_importance]
- sdf = extras['raw'][..., -1]
- if False:
- pts = extras['pts']
- torch.save(pts.cpu(), 'depth_pts_new.pt')
- print('depth_pts_new saved!!!!!!!!!!!')
-
- N_rays,N_samples = sdf.shape[:2]
- valid_rays = (valid_samples>0).any(dim=-1).bool().reshape(N_rays) & (batch[:,self.ray_type_slice]==0)
-
- ray_type = batch[:,self.ray_type_slice].reshape(-1)
- ray_weights = torch.ones((N_rays), device=rgb.device, dtype=torch.float32)
- ray_weights[(frame_ids==0).view(-1)] = self.cfg['first_frame_weight']
- ray_weights = ray_weights*valid_rays.view(-1)
- sample_weights = ray_weights.view(N_rays,1).expand(-1,N_samples) * valid_samples
- img_loss = (((rgb-target_s)**2 * ray_weights.view(-1,1))).mean()
- rgb_loss = self.cfg['rgb_weight'] * img_loss
- loss = rgb_loss
-
- rgb0_loss = torch.tensor(0)
- if 'rgb0' in extras:
- img_loss0 = (((extras['rgb0']-target_s)**2 * ray_weights.view(-1,1))).mean()
- rgb0_loss = img_loss0*self.cfg['rgb_weight']
- loss += rgb0_loss
-
- depth_loss = torch.tensor(0)
- depth_loss0 = torch.tensor(0)
- if self.cfg['depth_weight']>0:
- signs = sdf[:, 1:] * sdf[:, :-1]
- mask = signs<0
- inds = torch.argmax(mask.float(), axis=1)
- inds = inds[..., None]
- z_min = torch.gather(z_vals,dim=1,index=inds)
- weights = ray_weights * (depth<=self.cfg['far']*self.cfg['sc_factor']) * (mask.any(dim=-1))
- depth_loss = ((z_min*weights-depth.view(-1,1)*weights)**2).mean() * self.cfg['depth_weight']
- loss = loss+depth_loss
-
- truncation = self.get_truncation()
- sample_weights[ray_type==1] = 0
- fs_loss, sdf_loss,front_mask,sdf_mask = get_sdf_loss(z_vals, target_d.reshape(-1,1).expand(-1,N_samples), sdf, truncation, self.cfg,return_mask=True, sample_weights=sample_weights, rays_d=batch[:,self.ray_dir_slice])
- fs_loss = fs_loss*self.cfg['fs_weight']
- sdf_loss = sdf_loss*self.cfg['trunc_weight']
- # print(f"{self.cfg['fs_weight']=}") # 100
- # print(f"{self.cfg['trunc_weight']=}") # 6000
- loss = loss + fs_loss + sdf_loss
-
- # ### Mesh loss
- # idxs = torch.randperm(self.mesh_nearby_pts.shape[0])[:15000]
- # inputs = self.mesh_nearby_pts[idxs]
- # gts = self.mesh_nearby_sdfs[idxs]
- # target_normals_batch = self.surface_normals[idxs]
-
- # outputs, _ = self.run_network_density(inputs=inputs, get_normals=True)
-
- # mesh_hcsdf_loss = torch.tensor(0)
- # mesh_eikonal_loss = torch.tensor(0)
- # mesh_hessian_loss = torch.tensor(0)
- # mesh_normal_direction_loss = torch.tensor(0)
- # mesh_minimal_surface_loss = torch.tensor(0)
-
- # pred_sdf = outputs[..., 0]
-
- # mesh_hcsdf_loss = get_pretrain_sdf_loss(outputs, gts, self.cfg)
- # # mesh_hcsdf_loss, _, _ = get_support_point_loss(outputs[..., 0], gts, self.cfg)
- # mesh_eikonal_loss = get_pretrain_eikonal_loss(outputs, gts, self.cfg, self.truncation_scale)
- # mesh_normal_direction_loss = get_pretrain_normal_direction_loss(outputs, target_normals_batch, gts, self.cfg, inputs)
-
- # if self.global_step % 2000 == 0 and self.global_step > 0:
- # visualize_normals(inputs, outputs[..., -3:] / (torch.norm(outputs[..., -3:], dim=-1, keepdim=True)+1e-6), target_normals_batch/ (torch.norm(target_normals_batch, dim=-1, keepdim=True)+1e-6), )
-
- # mesh_minimal_surface_loss = get_pretrain_minimal_surface_loss(outputs, self.cfg)
-
- # loss = loss + mesh_hcsdf_loss
- # loss = loss + mesh_eikonal_loss
- # loss = loss + mesh_normal_direction_loss
- # loss = loss + mesh_minimal_surface_loss
-
-
- ##### Support point loss
- support_point_loss = torch.tensor(0)
- if self.cfg['support_pts'] and self.cfg['sdfs_from_cnets']:
- num_samples = 10000
-
- steps = int(self.support_pts.size(0) / num_samples) + 2
- start_step = 2000 - steps
- effect_step = self.global_step - start_step
- if effect_step < 0 or self.cps_finished == 1:
- indices = torch.randperm(self.support_pts.size(0))[:num_samples]
- else:
- if (effect_step+1)*num_samples >= len(self.sdfs_from_cnets):
- end_idx = len(self.sdfs_from_cnets)
- self.cps_finished = 1
- else:
- end_idx = (effect_step+1)*num_samples
- self.cps_finished = 0
- if effect_step*num_samples >= len(self.sdfs_from_cnets):
- start_idx = 0
- else:
- start_idx = effect_step*num_samples
- indices = torch.arange(start_idx, end_idx)
-
- support_pts = self.support_pts[indices]
- sdfs_from_cnets = self.sdfs_from_cnets[indices]
- sdfs_from_cnets *= self.cfg['sc_factor']/truncation
-
- support_pts, _ = post_process_generated_points(support_pts, self.offset, self.cfg['translation'], self.cfg['sc_factor'], self.annotated_poses_dir, self.normalized_mesh_path, num_samples, self.T_support_points)
- support_pts_flatten = support_pts.view(-1,3)
- # torch.save(support_pts_flatten.cpu(), './support_pts_processed.pt')
- # torch.save(sdfs_from_cnets.cpu(), './sdfs_from_cnets.pt')
- bundle_sdfs, _ = self.run_network_density(inputs=support_pts_flatten)
- bundle_sdfs = bundle_sdfs.squeeze(1)
- support_point_loss, empty_mask, sdf_mask = get_support_point_loss(bundle_sdfs, sdfs_from_cnets, self.cfg)
- support_point_loss *= self.cfg['support_pts_weight']
- # logging.info(f'{support_point_loss=}')
- # print(f'{bundle_sdfs.shape=}, {sdfs_from_cnets.shape=}')
- # print(f'{empty_mask.shape=},{sdf_mask.shape=}')
- empty_pts_support = support_pts_flatten[empty_mask]
- sdf_pts_support = support_pts_flatten[sdf_mask]
- # logging.info(f'{empty_pts_support.shape=}, {sdf_pts_support.shape=}')
- if effect_step >= 0:
- self.all_cps_sdf.append(sdf_pts_support.cpu().detach())
- self.all_cps_empty.append(empty_pts_support.cpu().detach())
- self.all_cps_sdf_predsdf.append(bundle_sdfs[sdf_mask].cpu().detach())
- self.all_cps_empty_predsdf.append(bundle_sdfs[empty_mask].cpu().detach())
- self.all_cps_sdf_gtsdf.append(sdfs_from_cnets[sdf_mask].cpu().detach())
- self.all_cps_empty_gtsdf.append(sdfs_from_cnets[empty_mask].cpu().detach())
- # torch.save(empty_pts_support.cpu(), 'empty_pts1.pt')
- # torch.save(sdf_pts_support.cpu(), 'sdf_pts1.pt')
- if self.cfg['dry_cnets']:
- pass
- else:
- loss+=support_point_loss
-
- ##### Hyperplane-constrained SDF loss
- hc_sdf_loss = torch.tensor(0)
- if self.cfg['sampled_pts'] and self.cfg['sdf_bounds_from_cnets']:
- num_samples = 10000
-
- # print(f"{self.global_step=}")
- steps = int(self.sampled_pts.size(0) / num_samples) + 2
- start_step = 2000 - steps
- effect_step = self.global_step - start_step
- if effect_step < 0 or self.hps_finished == 1:
- indices = torch.randperm(self.sampled_pts.size(0))[:num_samples]
- else:
- if (effect_step+1)*num_samples >= len(self.sampled_pts):
- end_idx = len(self.sampled_pts)
- self.hps_finished = 1
- else:
- end_idx = (effect_step+1)*num_samples
- self.hps_finished = 0
- # print(effect_step*num_samples, end_idx)
- indices = torch.arange(effect_step*num_samples, end_idx)
-
- sampled_pts = self.sampled_pts[indices]
- sdf_bounds_from_cnets = self.sdf_bounds_from_cnets[indices]
- sdf_bounds_from_cnets *= self.cfg['sc_factor']/truncation
- sampled_pts, _ = post_process_generated_points(sampled_pts, self.offset, self.cfg['translation'], self.cfg['sc_factor'], self.annotated_poses_dir, self.normalized_mesh_path, num_samples, self.T_support_points)
- sampled_pts_flatten = sampled_pts.view(-1,3)
- # torch.save(sampled_pts_flatten.cpu(), './sampled_pts_processed.pt')
- # torch.save(sdf_bounds_from_cnets.cpu(), './sdf_bounds_from_cnets.pt')
- bundle_sdfs, _ = self.run_network_density(inputs=sampled_pts_flatten)
- bundle_sdfs = bundle_sdfs.squeeze(1)
- hc_sdf_loss, empty_mask, sdf_mask = get_hcsdf_loss(bundle_sdfs, sdf_bounds_from_cnets, self.cfg)
- # logging.info(f'{hc_sdf_loss=}')
- empty_pts_hc = sampled_pts_flatten[empty_mask]
- sdf_pts_hc = sampled_pts_flatten[sdf_mask]
- if effect_step >= 0:
- self.all_hps_sdf.append(sdf_pts_hc.cpu().detach())
- self.all_hps_empty.append(empty_pts_hc.cpu().detach())
- self.all_hps_sdf_predsdf.append(bundle_sdfs[sdf_mask].cpu().detach())
- self.all_hps_empty_predsdf.append(bundle_sdfs[empty_mask].cpu().detach())
- self.all_hps_sdf_gtsdflb.append(sdf_bounds_from_cnets[sdf_mask].cpu().detach())
- self.all_hps_empty_gtsdflb.append(sdf_bounds_from_cnets[empty_mask].cpu().detach())
- # logging.info(f'{empty_pts_hc.shape=}, {sdf_pts_hc.shape=}')
- # torch.save(empty_pts_hc.cpu(), './empty_pts2.pt')
- # torch.save(sdf_pts_hc.cpu(), 'sdf_pts2.pt')
- if self.cfg['dry_cnets']:
- pass
- else:
- loss+=hc_sdf_loss
-
- if self.cps_finished:
- if os.path.exists(os.path.join(self.cfg['debug_dir'], 'cps_near_pcd.pt')):
- pass
- else:
- print(f"{self.global_step=}")
- all_cps_sdf = torch.cat(self.all_cps_sdf, 0)
- all_cps_empty = torch.cat(self.all_cps_empty, 0)
- all_cps_sdf_predsdf = torch.cat(self.all_cps_sdf_predsdf, 0)
- all_cps_empty_predsdf = torch.cat(self.all_cps_empty_predsdf, 0)
- all_cps_sdf_gtsdf = torch.cat(self.all_cps_sdf_gtsdf, 0)
- all_cps_empty_gtsdf = torch.cat(self.all_cps_empty_gtsdf, 0)
- # 1: cps (contact points), 2: hps (hyperplane points)
- # sdf: near space, empty: empty space (truncated)
- torch.save(all_cps_sdf, os.path.join(self.cfg['debug_dir'], 'cps_near_pcd.pt'))
- torch.save(all_cps_empty, os.path.join(self.cfg['debug_dir'], 'cps_empty_pcd.pt'))
- torch.save(all_cps_sdf_predsdf, os.path.join(self.cfg['debug_dir'], 'cps_near_sdf_pred.pt'))
- torch.save(all_cps_empty_predsdf, os.path.join(self.cfg['debug_dir'], 'cps_empty_sdf_pred.pt'))
- torch.save(all_cps_sdf_gtsdf, os.path.join(self.cfg['debug_dir'], 'cps_near_sdf_gt.pt'))
- torch.save(all_cps_empty_gtsdf, os.path.join(self.cfg['debug_dir'], 'cps_empty_sdf_gt.pt'))
- print("Stopped: cps!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
-
- if self.hps_finished:
- if os.path.exists(os.path.join(self.cfg['debug_dir'], 'hps_near_pcd.pt')):
- pass
- else:
- print(f"{self.global_step=}")
- all_hps_sdf = torch.cat(self.all_hps_sdf, 0)
- all_hps_empty = torch.cat(self.all_hps_empty, 0)
- all_hps_sdf_predsdf = torch.cat(self.all_hps_sdf_predsdf, 0)
- all_hps_empty_predsdf = torch.cat(self.all_hps_empty_predsdf, 0)
- all_hps_sdf_gtsdflb = torch.cat(self.all_hps_sdf_gtsdflb, 0)
- all_hps_empty_gtsdflb = torch.cat(self.all_hps_empty_gtsdflb, 0)
- torch.save(all_hps_sdf, os.path.join(self.cfg['debug_dir'], 'hps_near_pcd.pt'))
- torch.save(all_hps_empty, os.path.join(self.cfg['debug_dir'], 'hps_empty_pcd.pt'))
- torch.save(all_hps_sdf_predsdf, os.path.join(self.cfg['debug_dir'], 'hps_near_sdf_pred.pt'))
- torch.save(all_hps_empty_predsdf, os.path.join(self.cfg['debug_dir'], 'hps_empty_sdf_pred.pt'))
- torch.save(all_hps_sdf_gtsdflb, os.path.join(self.cfg['debug_dir'], 'hps_near_sdf_gt.pt'))
- torch.save(all_hps_empty_gtsdflb, os.path.join(self.cfg['debug_dir'], 'hps_empty_sdf_gt.pt'))
- print("Stopped: hps!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
-
- fs_rgb_loss = torch.tensor(0)
- if self.cfg['fs_rgb_weight']>0:
- fs_rgb_loss = ((((torch.sigmoid(extras['raw'][...,:3])-1)*front_mask[...,None])**2) * sample_weights[...,None]).mean()
- loss += fs_rgb_loss*self.cfg['fs_rgb_weight']
-
- eikonal_loss = torch.tensor(0)
- if self.cfg['eikonal_weight']>0:
- nerf_normals = extras['normals']
- eikonal_loss = ((torch.norm(nerf_normals[sdf<1], dim=-1)-1)**2).mean() * self.cfg['eikonal_weight']
- loss += eikonal_loss
-
- point_cloud_loss = torch.tensor(0)
- point_cloud_normal_loss = torch.tensor(0)
-
-
- reg_features = torch.tensor(0)
- if self.models['feature_array'] is not None:
- reg_features = self.cfg['feature_reg_weight'] * (self.models['feature_array'].data**2).mean()
- loss += reg_features
-
- if self.models['pose_array'] is not None:
- pose_array = self.models['pose_array']
- pose_reg = self.cfg['pose_reg_weight']*pose_array.data[1:].norm()
- loss += pose_reg
-
- variation_loss = torch.tensor(0)
-
- self.optimizer.zero_grad()
-
- self.amp_scaler.scale(loss).backward()
-
- self.amp_scaler.step(self.optimizer)
- self.amp_scaler.update()
- if self.global_step%10==0 and self.global_step>0:
- self.schedule_lr()
-
- if self.global_step%self.cfg['i_weights']==0 and self.global_step>0:
- self.save_weights(out_file=os.path.join(self.cfg['save_dir'], f'model_latest.pth'), models=self.models)
-
- if self.global_step % self.cfg['i_img'] == 0 and self.global_step>0:
- ids = torch.unique(self.rays[:, self.ray_frame_id_slice]).data.cpu().numpy().astype(int).tolist()
- ids.sort()
- last = ids[-1]
- ids = ids[::max(1,len(ids)//5)]
- if last not in ids:
- ids.append(last)
- canvas = []
- for frame_idx in ids:
- rgb, depth, ray_mask, gt_rgb, gt_depth, _ = self.render_images(frame_idx)
- mask_vis = (rgb*255*0.2 + ray_mask*0.8).astype(np.uint8)
- mask_vis = np.clip(mask_vis,0,255)
- rgb = np.concatenate((rgb,gt_rgb),axis=1)
- far = self.cfg['far']*self.cfg['sc_factor']
- gt_depth = np.clip(gt_depth, self.cfg['near']*self.cfg['sc_factor'], far)
- depth_vis = np.concatenate((to8b(depth / far), to8b(gt_depth / far)), axis=1)
- depth_vis = np.tile(depth_vis[...,None],(1,1,3))
- row = np.concatenate((to8b(rgb),depth_vis,mask_vis),axis=1)
- canvas.append(row)
- canvas = np.concatenate(canvas,axis=0).astype(np.uint8)
- dir = f"{self.cfg['save_dir']}/image_step_{self.global_step:07d}.png"
- imageio.imwrite(dir,canvas)
- if self._run is not None:
- self._run.add_artifact(dir)
-
-
- if self.global_step%self.cfg['i_print']==0:
- msg = f"Iter: {self.global_step}, valid_samples: {valid_samples.sum()}/{torch.numel(valid_samples)}, valid_rays: {valid_rays.sum()}/{torch.numel(valid_rays)}, "
- metrics = {
- 'loss':loss.item(),
- 'contact_pts_loss': contact_pts_loss.item(),
- 'support_point_loss': support_point_loss.item(),
- 'hc_sdf_loss': hc_sdf_loss.item(),
- # 'mesh_hc_sdf_loss': mesh_hcsdf_loss.item(),
- # 'mesh_eikonal_loss': mesh_eikonal_loss.item(),
- # 'mesh_hessian_loss': mesh_hessian_loss.item(),
- # 'mesh_normal_direction_loss': mesh_normal_direction_loss.item(),
- # 'mesh_minimal_surface_loss': mesh_minimal_surface_loss.item(),
- 'rgb_loss':rgb_loss.item(),
- 'rgb0_loss':rgb0_loss.item(),
- 'fs_rgb_loss': fs_rgb_loss.item(),
- 'depth_loss':depth_loss.item(),
- 'depth_loss0':depth_loss0.item(),
- 'fs_loss':fs_loss.item(),
- 'point_cloud_loss': point_cloud_loss.item(),
- 'point_cloud_normal_loss':point_cloud_normal_loss.item(),
- 'sdf_loss':sdf_loss.item(),
- 'eikonal_loss': eikonal_loss.item(),
- "variation_loss": variation_loss.item(),
- 'truncation(meter)': self.get_truncation()/self.cfg['sc_factor'],
- }
- if self.models['pose_array'] is not None:
- metrics['pose_reg'] = pose_reg.item()
- if 'feature_array' in self.models:
- metrics['reg_features'] = reg_features.item()
- for k in metrics.keys():
- msg += f"{k}: {metrics[k]:.7f}, "
- msg += "\n"
- logging.info(msg)
-
- if self._run is not None:
- for k in metrics.keys():
- self._run.log_scalar(k,metrics[k],self.global_step)
-
- if self.global_step % self.cfg['i_mesh'] == 0 and self.global_step > 0:
- with torch.no_grad():
- model = self.models['model_fine'] if self.models['model_fine'] is not None else self.models['model']
- mesh = self.extract_mesh(isolevel=0, voxel_size=self.cfg['mesh_resolution'])
- self.mesh = copy.deepcopy(mesh)
- if mesh is not None:
- dir = os.path.join(self.cfg['save_dir'], f'step_{self.global_step:07d}_mesh_normalized_space.obj')
- mesh.export(dir)
- if self._run is not None:
- self._run.add_artifact(dir)
- dir = os.path.join(self.cfg['save_dir'], f'step_{self.global_step:07d}_mesh_real_world.obj')
- if self.models['pose_array'] is not None:
- _,offset = get_optimized_poses_in_real_world(self.poses,self.models['pose_array'],translation=self.cfg['translation'],sc_factor=self.cfg['sc_factor'])
- else:
- offset = np.eye(4)
- mesh = mesh_to_real_world(mesh,offset,translation=self.cfg['translation'],sc_factor=self.cfg['sc_factor'])
- mesh.export(dir)
- if self._run is not None:
- self._run.add_artifact(dir)
-
- if self.global_step % self.cfg['i_pose'] == 0 and self.global_step > 0:
- if self.models['pose_array'] is not None:
- optimized_poses,offset = get_optimized_poses_in_real_world(self.poses,self.models['pose_array'],translation=self.cfg['translation'],sc_factor=self.cfg['sc_factor'])
- else:
- optimized_poses = self.poses
- dir = os.path.join(self.cfg['save_dir'], f'step_{self.global_step:07d}_optimized_poses.txt')
- np.savetxt(dir,optimized_poses.reshape(-1,4))
- if self._run is not None:
- self._run.add_artifact(dir)
-
-
def train(self):
set_seed(0)
- # if self.cfg['support_pts'] and self.cfg['sdfs_from_cnets']:
- # cnets_mesh_dir = './assets/test_004.obj' # pll output mesh from 23 trajectories
- # mesh_pts, mesh_sdfs, _, mesh_normals, near_surface_pts, near_surface_sdf, near_surface_normals = generate_mesh_pts(self.offset,self.cfg['translation'],self.cfg['sc_factor'],self.annotated_poses_dir,cnets_mesh_dir,num_surface=50000, num_inner=10, num_outer=10, dist=0.2, separate_on_and_near=True)
- # mesh_pts = mesh_pts.float().cuda()
- # mesh_sdfs = mesh_sdfs.float().cuda()
- # mesh_normals = mesh_normals.float().cuda()
-
- # near_surface_pts = near_surface_pts.float().cuda()
- # near_surface_sdf = near_surface_sdf.float().cuda()
- # near_surface_normals = near_surface_normals.float().cuda()
-
- # truncation_scale = self.get_truncation()
- # mesh_sdfs = mesh_sdfs / truncation_scale
- # near_surface_sdf = near_surface_sdf / truncation_scale
- # print(f'{mesh_pts.shape=}, {mesh_sdfs.shape=}, {mesh_normals.shape=}')
- # torch.save(mesh_pts.cpu(), 'mesh_pts.pt')
- # torch.save(mesh_sdfs.cpu(), 'mesh_sdfs.pt')
- # print(f"{mesh_sdfs.amax()=}, {mesh_sdfs.amin()=}")
- # self.pretrain_network(mesh_pts, mesh_sdfs, mesh_normals, truncation_scale, near_surface_pts, near_surface_sdf, near_surface_normals)
for iter in range(self.N_iters):
- if iter%(self.N_iters//10)==0:
+ if iter % (self.N_iters//10) == 0:
logging.info(f'train progress {iter}/{self.N_iters}')
batch = next(self.data_loader)
- self.train_loop(batch.cuda())
+ if self.contact_in_cam:
+ if self.data_loader_ps is not None:
+ batch_ps = next(self.data_loader_ps)
+ batch_ps = batch_ps.cuda()
+ else:
+ batch_ps = None
+ if self.data_loader_vs is not None:
+ batch_vs = next(self.data_loader_vs)
+ batch_vs = batch_vs.cuda()
+ else:
+ batch_vs = None
+ self.train_loop(batch.cuda(), batch_ps, batch_vs)
+ else:
+ self.train_loop(batch.cuda())
self.global_step += 1
-
+ print(f'Training done. The last iteration has global step{self.global_step-1}')
+ print(f"{self.cfg['n_step']=}, {self.N_iters=}")
def make_key_ray_ids(self):
- with gzip.open(f"{self.cfg['datadir']}/matches_all.pkl",'rb') as ff:
+ with gzip.open(f"{self.cfg['nerf_temp_dir']}/matches_all.pkl",'rb') as ff:
matches_table = pickle.load(ff)
key_ray_ids = []
kpts_vox_ids = []
@@ -1243,8 +1124,8 @@ def rays_to_pts_world(rays):
pts = (tf@to_homo_torch(pts)[...,None])[:,:3,0]
return pts,rgb
- ptsA,rgbA = rays_to_pts_world(rayA)
- ptsB,rgbB = rays_to_pts_world(rayB)
+ ptsA, rgbA = rays_to_pts_world(rayA)
+ ptsB, rgbB = rays_to_pts_world(rayB)
loss = (ptsA-ptsB).norm(dim=-1)
loss = loss[loss<0.02*self.cfg['sc_factor']].mean()
@@ -1262,7 +1143,7 @@ def rays_to_pts_world(rays):
optimized_poses,offset = get_optimized_poses_in_real_world(self.poses,self.models['pose_array'],translation=self.cfg['translation'],sc_factor=self.cfg['sc_factor'])
else:
optimized_poses = self.poses
- dir = os.path.join(self.cfg['save_dir'], f'step_{self.global_step}_optimized_poses.txt')
+ dir = op.join(self.cfg['nerf_temp_dir'], f'step_{self.global_step}_optimized_poses.txt')
np.savetxt(dir,optimized_poses.reshape(-1,4))
if self._run is not None:
self._run.add_artifact(dir)
@@ -1302,6 +1183,68 @@ def sample_rays_uniform_occupied_voxels(self,ray_ids,rays_d,depths_in_out,lindis
return z_vals,z_vals_continous
+ def track_mesh_visibility(self, mesh):
+ """Estimate the visilibity of the mesh by ray tracing.
+ Check normal direction of each vertex of the mesh.
+ A point is invisible if the normal direction has non-positive dot product with all rays.
+ Check ray-mesh intersection.
+ A point is visible if the first intersection between the ray and the mesh is on a triangle that contains the point.
+ Check
+ """
+ data_loader = DataLoader(rays=self.rays, batch_size=self.cfg['N_rand'], infinite=False)
+ indices_triangles_hit = []
+ for i, batch in enumerate(tqdm(data_loader, desc='track_mesh_visibility')):
+ indices_triangle = self.get_visible_triangles_from_rays(batch, mesh)
+ indices_triangles_hit.append(indices_triangle)
+ indices_triangles_hit = np.concatenate(indices_triangles_hit, axis=0)
+ # indices_triangles_hit has duplicated indices, we need to remove them
+ indices_triangles_hit = np.unique(indices_triangles_hit)
+ triangles_hit_mask = np.zeros(mesh.faces.shape[0], dtype=bool)
+ triangles_hit_mask[indices_triangles_hit] = True
+ indices_triangles_unhit = np.asarray(triangles_hit_mask == False).nonzero()[0]
+
+ indices_vertices_hit = mesh.faces[indices_triangles_hit].reshape(-1)
+ indices_vertices_hit = np.unique(indices_vertices_hit)
+
+ vertices_hit_mask = np.zeros(mesh.vertices.shape[0], dtype=bool)
+ vertices_hit_mask[indices_vertices_hit] = True
+ # indices_vertices_unhit = np.asarray(vertices_hit_mask == False).nonzero()[0]
+
+ return vertices_hit_mask, indices_triangles_unhit
+
+ def get_connected_vertices(self, mesh, vertices):
+ """Get all vertices that share an edge with given vertices"""
+ connected = set()
+
+ # For each face
+ for face in mesh.faces:
+ # If any vertex in face is in our target set
+ if any(v in vertices for v in face):
+ # Add all vertices of this face
+ connected.update(face)
+
+ return connected
+
+ def get_visible_triangles_from_rays(self, rays, mesh):
+ N_rays = rays.shape[0]
+ rays_d = rays[:,self.ray_dir_slice]
+ rays_o = torch.zeros_like(rays_d)
+ viewdirs = rays_d/rays_d.norm(dim=-1,keepdim=True)
+
+ frame_ids = rays[:,self.ray_frame_id_slice].long()
+ tf = self.c2w_array[frame_ids]
+ if self.models['pose_array'] is not None:
+ tf = self.models['pose_array'].get_matrices(frame_ids)@tf
+
+ rays_o_w = transform_pts(rays_o,tf)
+ viewdirs_w = (tf[:,:3,:3]@viewdirs[:,None].permute(0,2,1))[:,:3,0]
+ rays_o_w = rays_o_w.detach().cpu().numpy()
+ viewdirs_w = viewdirs_w.detach().cpu().numpy()
+
+ intersector = trimesh.ray.ray_pyembree.RayMeshIntersector(mesh)
+ indices_triangle = intersector.intersects_first(rays_o_w,viewdirs_w)
+ return indices_triangle
+
def render_rays(self,ray_batch,retraw=True,lindisp=False,perturb=False,raw_noise_std=0.,depth=None, get_normals=False, ray_ids=None):
"""Volumetric rendering.
@@ -1377,6 +1320,7 @@ def render_rays(self,ray_batch,retraw=True,lindisp=False,perturb=False,raw_noise
deformation = None
raw,normals,valid_samples = self.run_network(pts, viewdirs, frame_ids, tf=tf, valid_samples=valid_samples, get_normals=get_normals) # [N_rays, N_samples, 4]
+ # pdb.set_trace()
rgb_map, weights = self.raw2outputs(raw, z_vals, rays_d, raw_noise_std=raw_noise_std, valid_samples=valid_samples, depth=depth)
if self.cfg['N_importance'] > 0:
@@ -1468,7 +1412,9 @@ def sdf2weights(sdf):
return rgb_map, weights
- def render(self, rays, ray_ids=None, frame_ids=None,depth=None,lindisp=False,perturb=False,raw_noise_std=0.0, get_normals=False, near=None, far=None):
+ def render(self, rays, ray_ids=None, frame_ids=None, depth=None,
+ lindisp=False, perturb=False, raw_noise_std=0.0, get_normals=False,
+ near=None, far=None):
"""Render rays
Args:
H: int. Height of image in pixels.
@@ -1488,7 +1434,9 @@ def render(self, rays, ray_ids=None, frame_ids=None,depth=None,lindisp=False,per
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything.
"""
- all_ret = self.batchify_rays(rays,depth=depth,lindisp=lindisp,perturb=perturb,raw_noise_std=raw_noise_std, get_normals=get_normals, ray_ids=ray_ids)
+ all_ret = self.batchify_rays(rays, depth=depth, lindisp=lindisp,
+ perturb=perturb, raw_noise_std=raw_noise_std,
+ get_normals=get_normals, ray_ids=ray_ids)
k_extract = ['rgb_map']
ret_list = [all_ret[k] for k in k_extract]
@@ -1601,13 +1549,16 @@ def run_network(self, inputs, viewdirs, frame_ids, tf, latent_code=None, valid_s
def run_network_density(self, inputs, get_normals=False):
- """Directly query the network w/o pose transformations or deformations (inputs are already in normalized [-1,1]); Particularly used for mesh extraction
+ """Directly query the network w/o pose transformations or deformations
+ (inputs are already in normalized [-1,1]); Particularly used for mesh
+ extraction
+
@inputs: (N,3) sampled points on rays in GL camera's frame
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
inputs_flat = torch.clip(inputs_flat,-1,1)
- valid_samples = torch.ones((len(inputs_flat)),device=inputs.device).bool()
+ valid_samples = torch.ones((len(inputs_flat)), device=inputs.device).bool()
if not inputs_flat.requires_grad:
inputs_flat.requires_grad = True
@@ -1632,7 +1583,7 @@ def run_network_density(self, inputs, get_normals=False):
for i in range(0,embedded.shape[0],chunk):
alpha = self.models['model'].forward_sdf(embedded[i:i+chunk]) #(N,1)
outputs_flat.append(alpha.reshape(-1,1))
- outputs_flat = torch.cat(outputs_flat,dim=0).float()
+ outputs_flat = torch.cat(outputs_flat, dim=0).float()
outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])
if get_normals:
@@ -1640,7 +1591,7 @@ def run_network_density(self, inputs, get_normals=False):
normal = torch.autograd.grad(outputs=outputs,inputs=inputs_flat,grad_outputs=d_output,create_graph=True,retain_graph=True)[0] # ,allow_unused=True,only_inputs=True
outputs = torch.cat((outputs, normal), dim=-1)
- return outputs,valid_samples
+ return outputs, valid_samples
def pretrain_network(self, pts, sdfs, target_normals, truncation_scale, near_surface_pts, near_surface_sdf, near_surface_normals):
"""pts, N*3, sdfs: N, target_normals: N*3, near_surface_pts: N'*2M*3, near_surface_sdf: N'*2M*1, near_surface_normals: N'*2M*3
@@ -1686,19 +1637,19 @@ def pretrain_network(self, pts, sdfs, target_normals, truncation_scale, near_sur
hcsdf_loss = torch.tensor(0)
# # Sanity Check
- # idxs_sp = torch.randperm(self.support_pts.shape[0])[:batch_size]
- # inputs_sp = self.support_pts[idxs_sp]
+ # idxs_sp = torch.randperm(self.ps.shape[0])[:batch_size]
+ # inputs_sp = self.ps[idxs_sp]
# inputs_sp = inputs_sp.float().cuda()
- # gts_sp = self.sdfs_from_cnets[idxs_sp]
+ # gts_sp = self.sdfs[idxs_sp]
# gts_sp = gts_sp.float().cuda()
# outputs, _ = self.run_network_density(inputs=inputs_sp)
# support_point_loss = torch.tensor(0)
# support_point_loss,_,_ = get_support_point_loss(outputs, gts_sp, self.cfg)
# loss = loss + support_point_loss
- # idxs_hc = torch.randperm(self.sampled_pts.shape[0])[:batch_size]
- # inputs_hc = self.sampled_pts[idxs_hc]
- # gts_hc = self.sdf_bounds_from_cnets[idxs_hc]
+ # idxs_hc = torch.randperm(self.vs.shape[0])[:batch_size]
+ # inputs_hc = self.vs[idxs_hc]
+ # gts_hc = self.sdf_bounds[idxs_hc]
# inputs_hc = inputs_hc.float().cuda()
# gts_hc = gts_hc.float().cuda()
# outputs, _ = self.run_network_density(inputs=inputs_hc)
@@ -1763,9 +1714,40 @@ def pretrain_network(self, pts, sdfs, target_normals, truncation_scale, near_sur
mesh.export('pretrained.obj')
print('Finished Pretraining')
+ @torch.no_grad()
+ def extract_grid(self, level=None, voxel_size=0.003, isolevel=0.0):
+ # Query network on dense 3d grid of points
+ voxel_size *= self.cfg['sc_factor']
+ bounds = np.array(self.cfg['bounding_box']).reshape(2,3)
+ x_min, x_max = bounds[0,0], bounds[1,0]
+ y_min, y_max = bounds[0,1], bounds[1,1]
+ z_min, z_max = bounds[0,2], bounds[1,2]
+ tx = np.arange(x_min+0.5*voxel_size, x_max, voxel_size)
+ ty = np.arange(y_min+0.5*voxel_size, y_max, voxel_size)
+ tz = np.arange(z_min+0.5*voxel_size, z_max, voxel_size)
+ N = len(tx)
+ query_pts = torch.tensor(
+ np.stack(np.meshgrid(tx, ty, tz, indexing='ij'), -1
+ ).astype(np.float32).reshape(-1,3)).float().cuda()
+
+ flat = query_pts
+ logging.info(f'query_pts:{query_pts.shape}')
+ sigma = []
+ chunk = self.cfg['netchunk']
+ for i in range(0,flat.shape[0],chunk):
+ inputs = flat[i:i+chunk]
+ with torch.no_grad():
+ outputs,valid_samples = self.run_network_density(inputs=inputs)
+ sigma.append(outputs)
+ sigma = torch.cat(sigma, dim=0)
+ sigma = sigma.data.cpu().numpy()
+ bounds_first_last_voxel_center = \
+ np.array([tx[0], ty[0], tz[0], tx[-1], ty[-1], tz[-1]]).reshape(2,3)
+ return sigma, bounds_first_last_voxel_center, voxel_size, N
@torch.no_grad()
- def extract_mesh(self, level=None, voxel_size=0.003, isolevel=0.0, return_sigma=False):
+ def extract_mesh(self, level=None, voxel_size=0.003, isolevel=0.0,
+ return_sigma=False):
# Query network on dense 3d grid of points
voxel_size *= self.cfg['sc_factor'] # in "network space"
@@ -1777,7 +1759,9 @@ def extract_mesh(self, level=None, voxel_size=0.003, isolevel=0.0, return_sigma=
ty = np.arange(y_min+0.5*voxel_size, y_max, voxel_size)
tz = np.arange(z_min+0.5*voxel_size, z_max, voxel_size)
N = len(tx)
- query_pts = torch.tensor(np.stack(np.meshgrid(tx, ty, tz, indexing='ij'), -1).astype(np.float32).reshape(-1,3)).float().cuda()
+ query_pts = torch.tensor(
+ np.stack(np.meshgrid(tx, ty, tz, indexing='ij'), -1
+ ).astype(np.float32).reshape(-1,3)).float().cuda()
if self.octree_m is not None:
vox_size = self.cfg['octree_raytracing_voxel_size']*self.cfg['sc_factor']
@@ -1805,7 +1789,8 @@ def extract_mesh(self, level=None, voxel_size=0.003, isolevel=0.0, return_sigma=
from skimage import measure
try:
- vertices, triangles, normals, values = measure.marching_cubes(sigma, isolevel)
+ vertices, triangles, normals, values = measure.marching_cubes(
+ sigma, isolevel)
except Exception as e:
logging.info(f"ERROR Marching Cubes {e}")
return None
@@ -1813,16 +1798,25 @@ def extract_mesh(self, level=None, voxel_size=0.003, isolevel=0.0, return_sigma=
logging.info(f'done V:{vertices.shape}, F:{triangles.shape}')
# np.save(f'./vertices.npy', vertices)
# print(f'Saved vertices!')
- # Rescale and translate
- voxel_size_ndc = np.array([tx[-1] - tx[0], ty[-1] - ty[0], tz[-1] - tz[0]]) / np.array([[tx.shape[0] - 1, ty.shape[0] - 1, tz.shape[0] - 1]])
+
+ # Rescale and translate: The marching cubes algorithm has no notion of
+ # where the signed distance values were generated, so the vertex locations
+ # are in index-space.
+ voxel_size_ndc = np.array(
+ [tx[-1] - tx[0], ty[-1] - ty[0], tz[-1] - tz[0]]) / \
+ np.array([[tx.shape[0] - 1, ty.shape[0] - 1, tz.shape[0] - 1]])
offset = np.array([tx[0], ty[0], tz[0]])
- vertices[:, :3] = voxel_size_ndc.reshape(1,3) * vertices[:, :3] + offset.reshape(1,3)
+ vertices[:, :3] = voxel_size_ndc.reshape(1,3)*vertices[:, :3] + \
+ offset.reshape(1,3)
# Create mesh
mesh = trimesh.Trimesh(vertices, triangles, process=False)
if return_sigma:
- return mesh,sigma,query_pts
+ bounds_first_last_voxel_center = \
+ np.array([tx[0], ty[0], tz[0], tx[-1], ty[-1], tz[-1]]).reshape(2,3)
+ return mesh, sigma, bounds_first_last_voxel_center, voxel_size, N
+ # return mesh, sigma, query_pts
return mesh
@@ -1883,7 +1877,7 @@ def mesh_vertex_color_from_train_images(self,mesh):
cnt = np.zeros(len(mesh.vertices))
for i in range(len(self.images)):
print(f'mesh_vertex_color_from_train_images {i}/{len(self.images)}')
- cvcam_in_ob = tf[i]@np.linalg.inv(glcam_in_cvcam)
+ cvcam_in_ob = tf[i]@np.linalg.inv(GLCAM_IN_CVCAM)
_, depth = renderer.render([np.linalg.inv(cvcam_in_ob)])
xyz_map = depth2xyzmap(depth, self.K)
valid = (depth>=0.1*self.cfg['sc_factor']) & (self.masks[i].reshape(self.H,self.W).astype(bool))
@@ -1909,13 +1903,14 @@ def mesh_texture_from_train_images(self, mesh, rgbs_raw, train_texture=False, te
'''
assert len(self.images)==len(rgbs_raw)
+ print(f'Getting pose matrices')
frame_ids = torch.arange(len(self.images)).long().cuda()
tf = self.c2w_array[frame_ids]
if self.models['pose_array'] is not None:
tf = self.models['pose_array'].get_matrices(frame_ids)@tf
tf = tf.data.cpu().numpy()
- from offscreen_renderer import ModelRendererOffscreen
+ print(f'Postprocessing mesh') # It takes a long time for bad meshes
tex_image = torch.zeros((tex_res,tex_res,3)).cuda().float()
weight_tex_image = torch.zeros(tex_image.shape[:-1]).cuda().float()
mesh.merge_vertices()
@@ -1924,9 +1919,14 @@ def mesh_texture_from_train_images(self, mesh, rgbs_raw, train_texture=False, te
H,W = tex_image.shape[:2]
uvs_tex = (mesh.visual.uv*np.array([W-1,H-1]).reshape(1,2)) #(n_V,2)
- renderer = ModelRendererOffscreen([], cam_K=self.K, H=self.H, W=self.W, zfar=self.cfg['far']*self.cfg['sc_factor'])
+ print(f'Creating renderer')
+ from offscreen_renderer import ModelRendererOffscreen
+ renderer = ModelRendererOffscreen(
+ [], cam_K=self.K, H=self.H, W=self.W,
+ zfar=self.cfg['far']*self.cfg['sc_factor'])
renderer.add_mesh(mesh)
+ print(f'Mesh to CUDA')
vertices_cuda = torch.from_numpy(mesh.vertices).float().cuda()
faces_cuda = torch.from_numpy(mesh.faces).long().cuda()
face_vertices = torch.zeros((len(faces_cuda),3,3))
@@ -1937,7 +1937,7 @@ def mesh_texture_from_train_images(self, mesh, rgbs_raw, train_texture=False, te
print(f'project train_images {i}/{len(rgbs_raw)}')
############# Raterization
- cvcam_in_ob = tf[i]@np.linalg.inv(glcam_in_cvcam)
+ cvcam_in_ob = tf[i]@np.linalg.inv(GLCAM_IN_CVCAM)
_, render_depth = renderer.render([np.linalg.inv(cvcam_in_ob)])
xyz_map = depth2xyzmap(render_depth, self.K)
mask = self.masks[i].reshape(self.H,self.W).astype(bool)
@@ -2036,4 +2036,956 @@ def render_rays_sdf(self, points, tf, frame_ids, retraw=True):
if retraw:
ret['raw'] = raw
- return ret
\ No newline at end of file
+ return ret
+
+
+ # Helper methods (to be implemented within the class)
+ def compute_weights(self, batch, sdf, valid_samples, frame_ids):
+ """Compute ray and sample weights."""
+
+ N_rays, N_samples = sdf.shape[:2]
+ valid_rays = (valid_samples>0).any(dim=-1).bool().reshape(N_rays) & \
+ (batch[:,self.ray_type_slice]==0)
+
+ ray_type = batch[:,self.ray_type_slice].reshape(-1)
+ ray_weights = torch.ones((N_rays), device=sdf.device, dtype=torch.float32)
+ ray_weights[(frame_ids==0).view(-1)] = self.cfg['first_frame_weight']
+ ray_weights = ray_weights*valid_rays.view(-1)
+ sample_weights = ray_weights.view(N_rays,1).expand(-1,N_samples) * \
+ valid_samples
+ sample_weights[ray_type==1] = 0
+ return ray_weights, sample_weights, valid_rays
+
+ def compute_rgb_loss(self, rgb, target_s, ray_weights, extras):
+ """Compute RGB loss."""
+ img_loss = (((rgb - target_s) ** 2 * ray_weights.view(-1, 1))).mean()
+
+ img_loss0 = torch.tensor(0).to(rgb.device)
+ if 'rgb0' in extras:
+ img_loss0 = (((extras['rgb0'] - target_s) ** 2 * ray_weights.view(-1, 1))).mean()
+
+ loss_weighted = {
+ 'rgb_loss': img_loss * self.cfg['rgb_weight'],
+ 'rgb0_loss': img_loss0 * self.cfg['rgb_weight'],
+ }
+ loss_unweighted = {
+ 'rgb_loss': img_loss,
+ 'rgb0_loss': img_loss0,
+ }
+
+ return loss_weighted, loss_unweighted
+
+ def compute_depth_loss(self, sdf, z_vals, target_d, ray_weights):
+ """Compute depth loss."""
+ depth_loss = torch.tensor(0).to(sdf.device)
+ if self.cfg['depth_weight'] > 0:
+ # Compute depth loss
+ signs = sdf[:, 1:] * sdf[:, :-1]
+ mask = signs < 0
+ inds = torch.argmax(mask.float(), axis=1)[..., None]
+ z_min = torch.gather(z_vals, dim=1, index=inds)
+ weights = ray_weights * (target_d <= self.cfg['far'] * self.cfg['sc_factor']) * mask.any(dim=-1)
+ depth_loss = ((z_min * weights - target_d.view(-1, 1) * weights) ** 2).mean()
+ loss_weighted = {
+ 'depth_loss': depth_loss * self.cfg['depth_weight'],
+ 'depth_loss0': torch.tensor(0).to(sdf.device),
+ }
+ loss_unweighted = {
+ 'depth_loss': depth_loss,
+ 'depth_loss0': torch.tensor(0).to(sdf.device),
+ }
+ return loss_weighted, loss_unweighted
+
+ def compute_sdf_losses(self, sdf, z_vals, target_d, sample_weights, batch, truncation):
+ """Compute SDF-related losses."""
+ uncertain_fs_loss, empty_loss, sdf_loss, front_mask, empty_space_mask, \
+ sdf_mask, uncertain_fs_mask, uncertain_fs_mask_far, gt_sdf = \
+ get_sdf_loss(
+ z_vals, target_d.reshape(-1, 1).expand(-1, sdf.shape[1]), sdf,
+ truncation, self.cfg, return_mask=True, sample_weights=sample_weights,
+ rays_d=batch[:, self.ray_dir_slice]
+ )
+ losses_weighted = {
+ 'uncertain_fs_loss': uncertain_fs_loss * self.cfg['fs_weight'],
+ 'empty_loss': empty_loss * self.cfg['fs_weight'],
+ 'sdf_loss': sdf_loss * self.cfg['trunc_weight'],
+ }
+ losses_unweighted = {
+ 'uncertain_fs_loss': uncertain_fs_loss,
+ 'empty_loss': empty_loss,
+ 'sdf_loss': sdf_loss,
+ }
+ extras = {
+ 'masks': [front_mask, empty_space_mask, sdf_mask, uncertain_fs_mask, uncertain_fs_mask_far],
+ 'gt_sdf': gt_sdf
+ }
+ return losses_weighted, losses_unweighted, extras
+
+ def compute_convexity_vision_loss(self, near_pts, near_sdf, near_sdf_gt):
+ """Compute convexity loss."""
+ convexity_vision_loss = torch.tensor(0).to(near_sdf.device)
+ if self.cfg['convexity_vision_weight'] > 0:
+ ### Convexity sdf loss (without involving contacts)
+ near_sdf_neg_vision = near_sdf_gt[near_sdf_gt<0]
+ near_pts_neg_vision = near_pts[near_sdf_gt<0]
+ if len(near_sdf_neg_vision)>0:
+ pts_interp_vision, sdfs_interp_vision = convex_interp(near_pts_neg_vision, near_sdf_neg_vision,
+ num_samples=10000)
+ predicted_sdfs_interp_vision, _ = self.run_network_density(inputs=pts_interp_vision)
+ ### compute the loss (l1 upper bound)
+ convexity_vision_loss = torch.mean(torch.clamp(predicted_sdfs_interp_vision - sdfs_interp_vision, min=0))
+ self.log_cvv_pts(pts_interp_vision, predicted_sdfs_interp_vision, sdfs_interp_vision)
+ loss_weighted = {
+ 'convexity_vision_loss': convexity_vision_loss * self.cfg['convexity_vision_weight'],
+ }
+ loss_unweighted = {
+ 'convexity_vision_loss': convexity_vision_loss,
+ }
+ return loss_weighted, loss_unweighted
+
+ def log_cvv_pts(self, pts_interp_vision, predicted_sdfs_interp_vision, sdfs_interp_vision):
+ if self.store_sdf_pts:
+ if self.global_step % 200 == 0:
+ self.interp_pts_vision.append(pts_interp_vision.cpu().detach())
+ self.interp_pts_sdfub_vision.append(sdfs_interp_vision.cpu().detach())
+ self.interp_pts_predsdf_vision.append(predicted_sdfs_interp_vision.cpu().detach())
+
+ def log_pts(self, pts, sdf, masks):
+ front_mask, empty_space_mask, sdf_mask, uncertain_fs_mask, uncertain_fs_mask_far = masks
+ if self.store_sdf_pts:
+ if self.global_step % 200 == 0:
+ print(f"front_truncated: {front_mask.sum()}, front_truncated_badpred: {empty_space_mask.sum()}, near_surface: {sdf_mask.sum()}, far_truncated_badreg: {uncertain_fs_mask.sum()}, far_truncated: {uncertain_fs_mask_far.sum()}")
+ # expand the shape
+ self.free_pts.append(pts[front_mask].cpu().detach())
+ self.free_pts_predsdf.append(sdf[front_mask].cpu().detach())
+ self.near_pts.append(pts[sdf_mask].cpu().detach())
+ self.near_pts_predsdf.append(sdf[sdf_mask].cpu().detach())
+ self.uncertain_pts.append(pts[uncertain_fs_mask_far].cpu().detach())
+ self.uncertain_pts_predsdf.append(sdf[uncertain_fs_mask_far].cpu().detach())
+
+ def log_slices_wo_support(self):
+ # Saving slices when not using contact points.
+ if self.global_step == self.N_iters - 2 and self.cfg['n_step'] > 500:
+ if self.store_slices:
+ center = torch.tensor([0, 0, 0], device='cuda').float().reshape(1, 3)
+ slices = create_2d_slice_around_point(center, 0, (-1, 1), 200)
+ predicted_sdfs_slices, _ = self.run_network_density(inputs=slices.view(-1, 3))
+ predicted_sdfs_slices = predicted_sdfs_slices.reshape(slices.shape[:-1])
+ print(f"slices: {slices.shape}")
+ print(f"predicted_sdfs_slices: {predicted_sdfs_slices.shape}")
+ self.slice_cps.append(slices.cpu().detach())
+ self.slice_cps_predsdf.append(predicted_sdfs_slices.cpu().detach())
+
+ all_cps_slices = torch.cat(self.slice_cps, 0)
+ all_cps_slices_predsdf = torch.cat(self.slice_cps_predsdf, 0)
+
+ print(f"all_cps_slices: {all_cps_slices.shape}")
+ print(f"all_cps_slices_predsdf: {all_cps_slices_predsdf.shape}")
+
+ folder = file_utils.nerf_results_sdf_inspection_subdir(
+ self.cfg['debug_dir'], self.cfg['bundlesdf_run_id'])
+
+ torch.save(all_cps_slices, op.join(folder, 'cps_slices.pt'))
+ torch.save(all_cps_slices_predsdf, op.join(folder, 'cps_slices_predsdf.pt'))
+
+ def support_point_loss_cic(self, batch_ps, truncation):
+ ### Support point loss with contact_in_cam mode.
+ # Generate random indices, ensuring all samples are used first before
+ # repeating.
+ steps = int(50000 / self.ps_batch_size) + 2
+ start_step = self.cfg['n_step'] - steps
+ effect_step = self.global_step - start_step if self.cfg['n_step'] > 500 else -1
+ if effect_step < 0 or self.cps_finished == 1:
+ pass
+ else:
+ if (effect_step+1)*self.ps_batch_size >= 50000 or self.global_step == self.cfg['n_step']:
+ self.cps_finished = 1
+ else:
+ self.cps_finished = 0
+
+ support_point_loss = torch.tensor(0).to(batch_ps.device)
+ ps_in_cam = batch_ps[:, self.contact_ps_slice]
+ ps_in_cam_sdf = batch_ps[:, self.contact_sdfs_slice]
+ ps_frame_ids = batch_ps[:, self.contact_frame_ids_slice].long()
+ ps_tf = self.c2w_array[ps_frame_ids]
+ if self.models['pose_array'] is not None:
+ ps_tf = self.models['pose_array'].get_matrices(ps_frame_ids)@ps_tf
+
+ glcam_in_cvcam = torch.tensor(GLCAM_IN_CVCAM, device=ps_in_cam.device, dtype=torch.float32).unsqueeze(0)
+ ps_tf = ps_tf @ torch.linalg.inv(glcam_in_cvcam)
+
+ ps_in_world = transform_pts(ps_in_cam, ps_tf)
+ valid_samples_ps = (torch.abs(ps_in_world)<=1).all(dim=-1).bool()
+
+ ### filter contact points based on octree
+ if self.octree_contact_filter:
+ idx_support_pts_in_octree = self.octree_vision_m.get_center_ids(ps_in_world, self.octree_vision_m.max_level)
+ valid_samples_ps = valid_samples_ps & (idx_support_pts_in_octree==-1)
+ # if valid_samples_ps.sum() > 0:
+ # pdb.set_trace()
+
+ ps_in_world_valid = ps_in_world[valid_samples_ps]
+ ps_in_cam_sdf_valid = ps_in_cam_sdf[valid_samples_ps]
+
+ support_pts_flatten = ps_in_world_valid.view(-1,3).float().cuda()
+
+ # Convert sdf target to regression space
+ sdfs_from_cnets = copy.deepcopy(ps_in_cam_sdf_valid)
+ sdfs_from_cnets = \
+ contact_loss_utils.convert_meter_sdfs_to_normalized_sdfs(
+ sdfs=sdfs_from_cnets, sc_factor=self.cfg['sc_factor'],
+ truncation=truncation
+ )
+
+ if valid_samples_ps.sum() == 0:
+ empty_mask = torch.tensor([], device=ps_in_world.device, dtype=torch.bool)
+ sdf_mask = torch.tensor([], device=ps_in_world.device, dtype=torch.bool)
+ predicted_sdfs = torch.tensor([], device=ps_in_world.device, dtype=torch.float32)
+ on_surface_gt_mask = torch.tensor([], device=ps_in_world.device, dtype=torch.bool)
+ on_surface_pred_mask = torch.tensor([], device=ps_in_world.device, dtype=torch.bool)
+ else:
+ predicted_sdfs, _ = self.run_network_density(inputs=support_pts_flatten)
+ predicted_sdfs = predicted_sdfs.squeeze(1)
+
+ support_point_loss, empty_mask, sdf_mask, on_surface_gt_mask, on_surface_pred_mask = get_support_point_loss(
+ predicted_sdf=predicted_sdfs, support_point_sdf=sdfs_from_cnets,
+ cfg=self.cfg)
+ # support_point_loss = support_point_loss * self.cfg['support_pts_weight']
+
+ empty_pts_support = support_pts_flatten[empty_mask]
+ sdf_pts_support = support_pts_flatten[sdf_mask]
+ sdf_pts_support_sdf_gt = sdfs_from_cnets[sdf_mask]
+ extras = [empty_pts_support, sdf_pts_support, sdf_pts_support_sdf_gt, effect_step,
+ on_surface_gt_mask, on_surface_pred_mask, predicted_sdfs, sdfs_from_cnets, support_pts_flatten, empty_mask, sdf_mask]
+ return support_point_loss, extras
+
+ def support_point_loss_global(self, truncation):
+ num_samples = 10000
+
+ # print(f"self.ps.shape: {self.ps.shape}") # 18000
+
+ # Generate random indices, ensuring all samples are used first before
+ # repeating.
+ steps = int(self.ps.size(0) / num_samples) + 2
+ start_step = self.cfg['n_step'] - steps
+ effect_step = self.global_step - start_step if self.cfg['n_step'] > 500 else -1
+ if effect_step < 0 or self.cps_finished == 1:
+ indices = torch.randperm(self.ps.size(0))[:num_samples]
+ else:
+ if (effect_step+1)*num_samples >= len(self.sdfs) or self.global_step == self.cfg['n_step']:
+ end_idx = len(self.sdfs)
+ self.cps_finished = 1
+ else:
+ end_idx = (effect_step+1)*num_samples
+ self.cps_finished = 0
+ if effect_step*num_samples >= len(self.sdfs):
+ start_idx = 0
+ else:
+ start_idx = effect_step*num_samples
+ indices = torch.arange(start_idx, end_idx)
+
+ # Load PLL data.
+ sdfs_from_cnets = copy.deepcopy(self.sdfs[indices])
+ sdfs_from_cnets = \
+ contact_loss_utils.convert_meter_sdfs_to_normalized_sdfs(
+ sdfs=sdfs_from_cnets, sc_factor=self.cfg['sc_factor'],
+ truncation=truncation
+ )
+
+ support_pts = copy.deepcopy(self.ps[indices])
+ support_pts = \
+ contact_loss_utils.convert_meter_points_to_normalized_space_for_sdf(
+ points=support_pts, sc_factor=self.cfg['sc_factor'],
+ offset=self.offset, translation=self.cfg['translation'],
+ bsdf_init_pose=self.ob_init_cam
+ )
+
+ support_pts_flatten = support_pts.view(-1,3).float().cuda()
+
+ predicted_sdfs, _ = self.run_network_density(inputs=support_pts_flatten)
+ # predicted_sdfs, _ = self.run_network_density(inputs=support_pts_flatten, get_normals=True)
+ # predicted_sdfs, predicted_normals = predicted_sdfs[..., :-3], predicted_sdfs[..., -3:]
+ # # eikonal_loss = torch.tensor(0)
+ # # if self.cfg['support_eikonal_weight']>0:
+ # # nerf_normals = extras['normals']
+ # eikonal_loss = ((torch.norm(predicted_normals, dim=-1)-1)**2).mean() #* self.cfg['eikonal_weight']
+ # loss += eikonal_loss
+
+ predicted_sdfs = predicted_sdfs.squeeze(1)
+
+ support_point_loss, empty_mask, sdf_mask, on_surface_gt_mask, on_surface_pred_mask = get_support_point_loss(
+ predicted_sdf=predicted_sdfs, support_point_sdf=sdfs_from_cnets,
+ cfg=self.cfg)
+ # support_point_loss *= self.cfg['support_pts_weight']
+
+ empty_pts_support = support_pts_flatten[empty_mask]
+ sdf_pts_support = support_pts_flatten[sdf_mask]
+ sdf_pts_support_sdf_gt = sdfs_from_cnets[sdf_mask]
+
+ extras = [empty_pts_support, sdf_pts_support, sdf_pts_support_sdf_gt, effect_step,
+ on_surface_gt_mask, on_surface_pred_mask, predicted_sdfs, sdfs_from_cnets, support_pts_flatten, empty_mask, sdf_mask]
+ return support_point_loss, extras
+
+ def log_slices_w_support(self, extras_sup):
+ if len(extras_sup) == 0:
+ return
+ empty_pts_support, sdf_pts_support, sdf_pts_support_sdf_gt, effect_step, \
+ on_surface_gt_mask, on_surface_pred_mask, predicted_sdfs, sdfs_from_cnets, support_pts_flatten, empty_mask, sdf_mask = extras_sup
+ if effect_step >= 0:
+ self.all_cps_sdf.append(sdf_pts_support.cpu().detach())
+ self.all_cps_empty.append(empty_pts_support.cpu().detach())
+ self.all_cps_sdf_predsdf.append(predicted_sdfs[sdf_mask].cpu().detach())
+ self.all_cps_empty_predsdf.append(
+ predicted_sdfs[empty_mask].cpu().detach())
+ self.all_cps_sdf_gtsdf.append(sdfs_from_cnets[sdf_mask].cpu().detach())
+ self.all_cps_empty_gtsdf.append(
+ sdfs_from_cnets[empty_mask].cpu().detach())
+
+ if self.store_slices:
+ on_surface_gt_pts = support_pts_flatten[on_surface_gt_mask]
+ on_surface_pred_pts = support_pts_flatten[on_surface_pred_mask]
+ if on_surface_gt_pts.shape[0] > 0:
+ ### create a 2D grid YZ slice around the on surface points, scope: [-1, 1]
+ ### generate the sdf prediction for the slice and create a 2D map to view the sdf map
+ slices = create_2d_slice_around_point(on_surface_gt_pts, 0, (-1, 1), 200)
+ predicted_sdfs_slices, _ = self.run_network_density(inputs=slices.view(-1, 3))
+ predicted_sdfs_slices = predicted_sdfs_slices.reshape(slices.shape[:-1])
+ print(f"on_surface_gt_pts: {on_surface_gt_pts.shape}")
+ print(f"slices: {slices.shape}")
+ print(f"predicted_sdfs_slices: {predicted_sdfs_slices.shape}")
+ self.slice_cps_anchor.append(on_surface_gt_pts.cpu().detach())
+ self.slice_cps.append(slices.cpu().detach())
+ self.slice_cps_predsdf.append(predicted_sdfs_slices.cpu().detach())
+
+ def compute_convexity_loss(self, extra_support, near_sdf_gt, near_pts, batch_ps):
+ convexity_loss = torch.tensor(0).to(near_pts.device)
+ if self.cfg['convexity_weight'] > 0:
+ if self.contact_in_cam and batch_ps is None:
+ pass
+ else:
+ empty_pts_support, sdf_pts_support, sdf_pts_support_sdf_gt, effect_step, \
+ on_surface_gt_mask, on_surface_pred_mask, predicted_sdfs, sdfs_from_cnets, support_pts_flatten, empty_mask, sdf_mask = extra_support
+ ### interpolate the sdf points and the interpolated sdf as upper bound (convexity prior)
+ # sdf_pts_support_sdf = predicted_sdfs[sdf_mask]
+ # sdf_pts_support_sdf_gt = sdfs_from_cnets[sdf_mask]
+ sdf_pts_support_sdf_neg = sdf_pts_support_sdf_gt[sdf_pts_support_sdf_gt<0]#.detach()
+ sdf_pts_support_neg = sdf_pts_support[sdf_pts_support_sdf_gt<0]#.detach()
+
+ near_sdf_neg = near_sdf_gt[near_sdf_gt<0]#.detach()
+ near_pts_neg = near_pts[near_sdf_gt<0]#.detach()
+
+ len_cnet_pts = sdf_pts_support_neg.shape[0]
+ len_bsdf_pts = near_pts_neg.shape[0]
+ if len_bsdf_pts > 0 and len_cnet_pts > 0:
+ pts_end_1_1, idx_end_1_1 = sample_point_based_on_distance_kdtree(sdf_pts_support_neg, near_pts_neg, 20000)
+ pts_end_1_2, idx_end_1_2 = sample_point_based_on_density_voxel(sdf_pts_support_neg, voxel_size=0.3, num_samples=30000)
+ pts_end_1 = torch.cat([pts_end_1_1, pts_end_1_2], dim=0)
+ sdfs_end_1 = torch.cat([sdf_pts_support_sdf_neg[idx_end_1_1], sdf_pts_support_sdf_neg[idx_end_1_2]], dim=0)
+
+ idx_end_2_1 = torch.randint(0, len(near_pts_neg), (40000,)) # near_pts_neg is on the magnitude of 10^5
+ _, idx_end_2_2 = sample_point_based_on_density_voxel(sdf_pts_support_neg, voxel_size=0.3, num_samples=10000)
+ pts_end_2 = torch.cat([near_pts_neg[idx_end_2_1], sdf_pts_support_neg[idx_end_2_2]], dim=0)
+ sdfs_end_2 = torch.cat([near_sdf_neg[idx_end_2_1], sdf_pts_support_sdf_neg[idx_end_2_2]], dim=0)
+ pts_interp, sdfs_interp = convex_interp(pts_end_1, sdfs_end_1, pts_end_2, sdfs_end_2, power=self.cfg['power_interp']) # 0.3
+
+ if self.cfg['convexity_vision_weight'] > 0:
+ pts_interp_3, sdfs_interp_3 = convex_interp(near_pts_neg, near_sdf_neg, num_samples=10000)
+ pts_interp = torch.cat([pts_interp, pts_interp_3], dim=0)
+ sdfs_interp = torch.cat([sdfs_interp, sdfs_interp_3], dim=0)
+
+ pts_neg = torch.cat([near_pts_neg, sdf_pts_support_neg], dim=0)
+ sdfs_neg = torch.cat([near_sdf_neg, sdf_pts_support_sdf_neg], dim=0)
+
+ ### feed the interpolated points to the network
+ predicted_sdfs_interp, _ = self.run_network_density(inputs=pts_interp)
+ predicted_sdfs_interp = predicted_sdfs_interp.squeeze(1)
+
+ ### compute the loss (l1 upper bound)
+ convexity_loss = torch.mean(torch.clamp(predicted_sdfs_interp - sdfs_interp, min=0))
+
+ if effect_step >= 0:
+ self.interp_pts.append(pts_interp.cpu().detach())
+ self.interp_pts_predsdf.append(predicted_sdfs_interp.cpu().detach())
+ self.interp_pts_sdfub.append(sdfs_interp.cpu().detach())
+ self.interp_end_pts.append(pts_neg.cpu().detach())
+ self.interp_end_pts_sdf.append(sdfs_neg.cpu().detach())
+
+ loss_weighted = {
+ 'convexity_loss': convexity_loss * self.cfg['convexity_weight'],
+ }
+ loss_unweighted = {
+ 'convexity_loss': convexity_loss,
+ }
+ return loss_weighted, loss_unweighted
+
+ def compute_support_point_loss(self, batch_ps, truncation):
+ """Compute support point loss."""
+ support_point_loss = torch.tensor(0) #.to(batch_ps.device)
+ extras = []
+ if self.contact_in_cam:
+ if batch_ps is None:
+ effect_step = -1
+ else:
+ support_point_loss, extras = self.support_point_loss_cic(batch_ps, truncation)
+ else:
+ support_point_loss, extras = self.support_point_loss_global(truncation)
+ loss_weighted = {
+ 'support_point_loss': support_point_loss * self.cfg['support_pts_weight'],
+ }
+ loss_unweighted = {
+ 'support_point_loss': support_point_loss,
+ }
+
+ return loss_weighted, loss_unweighted, extras
+
+ def hyperplane_loss_cic(self, batch_vs, truncation):
+ steps = int(50000 / self.vs_batch_size) + 2
+ start_step = self.cfg['n_step'] - steps
+ effect_step = self.global_step - start_step if self.cfg['n_step'] > 500 else -1
+ if effect_step < 0 or self.hps_finished == 1:
+ pass
+ else:
+ if (effect_step+1)*self.vs_batch_size >= 50000 or self.global_step == self.cfg['n_step']:
+ self.hps_finished = 1
+ else:
+ self.hps_finished = 0
+
+ vs_in_cam = batch_vs[:, self.contact_ps_slice]
+ vs_sdf_lb_in_cam = batch_vs[:, self.contact_sdfs_slice]
+ vs_frame_ids = batch_vs[:, self.contact_frame_ids_slice].long()
+ vs_tf = self.c2w_array[vs_frame_ids]
+ if self.models['pose_array'] is not None:
+ vs_tf = self.models['pose_array'].get_matrices(vs_frame_ids)@vs_tf
+
+ glcam_in_cvcam = torch.tensor(GLCAM_IN_CVCAM, device=vs_in_cam.device, dtype=torch.float32).unsqueeze(0)
+ vs_tf = vs_tf @ torch.linalg.inv(glcam_in_cvcam)
+
+ vs_in_world = transform_pts(vs_in_cam, vs_tf)
+ valid_samples_vs = (torch.abs(vs_in_world)<=1).all(dim=-1).bool()
+
+ vs_in_world = vs_in_world[valid_samples_vs]
+ vs_sdf_lb_in_cam = vs_sdf_lb_in_cam[valid_samples_vs]
+
+ sampled_pts_flatten_hc = vs_in_world.view(-1,3).float().cuda()
+
+ # Convert sdf target to regression space
+ sdf_bounds_from_cnets = copy.deepcopy(vs_sdf_lb_in_cam)
+ sdf_bounds_from_cnets = \
+ contact_loss_utils.convert_meter_sdfs_to_normalized_sdfs(
+ sdfs=sdf_bounds_from_cnets, sc_factor=self.cfg['sc_factor'],
+ truncation=truncation
+ )
+
+ hc_sdf_loss = torch.tensor(0).to(vs_in_world.device)
+ if valid_samples_vs.sum() == 0:
+ empty_mask_hc = torch.tensor([], device=vs_in_world.device, dtype=torch.bool)
+ sdf_mask_hc = torch.tensor([], device=vs_in_world.device, dtype=torch.bool)
+ predicted_sdfs_hc = torch.tensor([], device=vs_in_world.device, dtype=torch.float32)
+ else:
+ predicted_sdfs_hc, _ = self.run_network_density(inputs=sampled_pts_flatten_hc)
+ predicted_sdfs_hc = predicted_sdfs_hc.squeeze(1)
+
+ try:
+ hc_sdf_loss, empty_mask_hc, sdf_mask_hc = get_hcsdf_loss(
+ predicted_sdf=predicted_sdfs_hc, sdf_lower_bound=sdf_bounds_from_cnets,
+ cfg=self.cfg)
+ except Exception as e:
+ print(f"Error in get_hcsdf_loss: {e}")
+ print(f"{valid_samples_vs.sum()=}, {valid_samples_vs.shape=}, {vs_in_world.shape=}, {sampled_pts_flatten_hc.shape=}")
+ # hc_sdf_loss = torch.tensor(0)
+ # empty_mask_hc = torch.tensor([], device=vs_in_world.device, dtype=torch.bool)
+ # sdf_mask_hc = torch.tensor([], device=vs_in_world.device, dtype=torch.bool)
+ raise e
+
+ empty_pts_hc = sampled_pts_flatten_hc[empty_mask_hc]
+ sdf_pts_hc = sampled_pts_flatten_hc[sdf_mask_hc]
+
+ extras = [sdf_pts_hc, empty_pts_hc, predicted_sdfs_hc, sdf_bounds_from_cnets, sdf_mask_hc, empty_mask_hc, effect_step]
+ return hc_sdf_loss, extras
+
+ def hyperplane_loss_global(self, truncation):
+
+ num_samples = 10000
+
+ # Generate random indices, ensuring all samples are used first before
+ # repeating.
+ steps = int(self.vs.size(0) / num_samples) + 2
+ start_step = self.cfg['n_step'] - steps
+ effect_step = self.global_step - start_step if self.cfg['n_step'] > 500 else -1
+ if effect_step < 0 or self.hps_finished == 1:
+ indices = torch.randperm(self.vs.size(0))[:num_samples]
+ else:
+ if (effect_step+1)*num_samples >= len(self.vs) or self.global_step == self.cfg['n_step']:
+ end_idx = len(self.vs)
+ self.hps_finished = 1
+ else:
+ end_idx = (effect_step+1)*num_samples
+ self.hps_finished = 0
+ indices = torch.arange(effect_step*num_samples, end_idx)
+
+ # Load PLL data.
+ sdf_bounds_from_cnets = copy.deepcopy(self.sdf_bounds[indices])
+ sdf_bounds_from_cnets = \
+ contact_loss_utils.convert_meter_sdfs_to_normalized_sdfs(
+ sdfs=sdf_bounds_from_cnets, sc_factor=self.cfg['sc_factor'],
+ truncation=truncation
+ )
+
+ sampled_pts = copy.deepcopy(self.vs[indices])
+ sampled_pts = \
+ contact_loss_utils.convert_meter_points_to_normalized_space_for_sdf(
+ points=sampled_pts, sc_factor=self.cfg['sc_factor'],
+ offset=self.offset, translation=self.cfg['translation'],
+ bsdf_init_pose=self.ob_init_cam
+ )
+
+ sampled_pts_flatten_hc = sampled_pts.view(-1,3).float().cuda()
+ predicted_sdfs_hc, _ = self.run_network_density(inputs=sampled_pts_flatten_hc)
+ predicted_sdfs_hc = predicted_sdfs_hc.squeeze(1)
+ hc_sdf_loss, empty_mask_hc, sdf_mask_hc = get_hcsdf_loss(
+ predicted_sdf=predicted_sdfs_hc, sdf_lower_bound=sdf_bounds_from_cnets,
+ cfg=self.cfg)
+
+ empty_pts_hc = sampled_pts_flatten_hc[empty_mask_hc]
+ sdf_pts_hc = sampled_pts_flatten_hc[sdf_mask_hc]
+
+ extras = [sdf_pts_hc, empty_pts_hc, predicted_sdfs_hc, sdf_bounds_from_cnets, sdf_mask_hc, empty_mask_hc, effect_step]
+ return hc_sdf_loss, extras
+
+ def log_hpc_pts(self, extras_hpc):
+ if len(extras_hpc) == 0:
+ return
+ sdf_pts_hc, empty_pts_hc, predicted_sdfs_hc, sdf_bounds_from_cnets, sdf_mask_hc, empty_mask_hc, effect_step = extras_hpc
+ if effect_step >= 0:
+ self.all_hps_sdf.append(sdf_pts_hc.cpu().detach())
+ self.all_hps_empty.append(empty_pts_hc.cpu().detach())
+ self.all_hps_sdf_predsdf.append(predicted_sdfs_hc[sdf_mask_hc].cpu().detach())
+ self.all_hps_empty_predsdf.append(
+ predicted_sdfs_hc[empty_mask_hc].cpu().detach())
+ self.all_hps_sdf_gtsdflb.append(
+ sdf_bounds_from_cnets[sdf_mask_hc].cpu().detach())
+ self.all_hps_empty_gtsdflb.append(
+ sdf_bounds_from_cnets[empty_mask_hc].cpu().detach())
+ return
+
+ def save_all_pts(self):
+ ### Save some things if contact points and hyperplane-constrained points are
+ # done being computed.
+ if self.cps_finished:
+ folder = file_utils.nerf_results_sdf_inspection_subdir(
+ self.cfg['debug_dir'], self.cfg['bundlesdf_run_id'])
+ if op.exists(op.join(folder, 'cps_near_pcd.pt')):
+ pass
+ else:
+ print(f"{self.global_step=}, saving cps")
+ all_cps_sdf = torch.cat(self.all_cps_sdf, 0)
+ all_cps_empty = torch.cat(self.all_cps_empty, 0)
+ all_cps_sdf_predsdf = torch.cat(self.all_cps_sdf_predsdf, 0)
+ all_cps_empty_predsdf = torch.cat(self.all_cps_empty_predsdf, 0)
+ all_cps_sdf_gtsdf = torch.cat(self.all_cps_sdf_gtsdf, 0)
+ all_cps_empty_gtsdf = torch.cat(self.all_cps_empty_gtsdf, 0)
+
+ if self.store_slices:
+ if len(self.slice_cps) > 0:
+ all_cps_slices = torch.cat(self.slice_cps, 0)
+ all_cps_slices_anchor = torch.cat(self.slice_cps_anchor, 0)
+ all_cps_slices_predsdf = torch.cat(self.slice_cps_predsdf, 0)
+ else:
+ all_cps_slices = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_cps_slices_anchor = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_cps_slices_predsdf = torch.tensor([], device='cuda', dtype=torch.float32)
+
+ print(f"all_cps_slices: {all_cps_slices.shape}")
+ print(f"all_cps_slices_anchor: {all_cps_slices_anchor.shape}")
+ print(f"all_cps_slices_predsdf: {all_cps_slices_predsdf.shape}")
+
+ if len(self.interp_pts) > 0:
+ all_interp_pts = torch.cat(self.interp_pts, 0)
+ all_interp_pts_predsdf = torch.cat(self.interp_pts_predsdf, 0)
+ all_interp_pts_sdfub = torch.cat(self.interp_pts_sdfub, 0)
+ all_interp_end_pts = torch.cat(self.interp_end_pts, 0)
+ all_interp_end_pts_sdf = torch.cat(self.interp_end_pts_sdf, 0)
+ else:
+ all_interp_pts = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_interp_pts_predsdf = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_interp_pts_sdfub = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_interp_end_pts = torch.tensor([], device='cuda', dtype=torch.float32)
+ all_interp_end_pts_sdf = torch.tensor([], device='cuda', dtype=torch.float32)
+
+ print(f"all_interp_pts: {all_interp_pts.shape}")
+
+ # 1: cps (contact points), 2: hps (hyperplane points)
+ # sdf: near space, empty: empty space (truncated)
+ print(f"saving cps to {folder}, {all_cps_sdf.shape=}, {all_cps_empty.shape=}")
+ torch.save(all_cps_sdf,
+ op.join(folder, 'cps_near_pcd.pt'))
+ torch.save(all_cps_empty,
+ op.join(folder, 'cps_empty_pcd.pt'))
+ torch.save(all_cps_sdf_predsdf,
+ op.join(folder, 'cps_near_sdf_pred.pt'))
+ torch.save(all_cps_empty_predsdf,
+ op.join(folder, 'cps_empty_sdf_pred.pt'))
+ torch.save(all_cps_sdf_gtsdf,
+ op.join(folder, 'cps_near_sdf_gt.pt'))
+ torch.save(all_cps_empty_gtsdf,
+ op.join(folder, 'cps_empty_sdf_gt.pt'))
+
+ if self.store_slices:
+ torch.save(all_cps_slices, op.join(folder, 'cps_slices.pt'))
+ torch.save(all_cps_slices_anchor, op.join(folder, 'cps_slices_anchor.pt'))
+ torch.save(all_cps_slices_predsdf, op.join(folder, 'cps_slices_predsdf.pt'))
+
+ torch.save(all_interp_pts, op.join(folder, 'interp_pts.pt'))
+ torch.save(all_interp_pts_predsdf, op.join(folder, 'interp_pts_predsdf.pt'))
+ torch.save(all_interp_pts_sdfub, op.join(folder, 'interp_pts_sdfub.pt'))
+ torch.save(all_interp_end_pts, op.join(folder, 'interp_end_pts.pt'))
+ torch.save(all_interp_end_pts_sdf, op.join(folder, 'interp_end_pts_sdf.pt'))
+
+ if self.store_sdf_pts:
+ torch.save(self.free_pts, op.join(folder, 'free_pts.pt'))
+ torch.save(self.free_pts_predsdf, op.join(folder, 'free_pts_predsdf.pt'))
+ torch.save(self.near_pts, op.join(folder, 'near_pts.pt'))
+ torch.save(self.near_pts_predsdf, op.join(folder, 'near_pts_predsdf.pt'))
+ torch.save(self.uncertain_pts, op.join(folder, 'uncertain_pts.pt'))
+ torch.save(self.uncertain_pts_predsdf, op.join(folder, 'uncertain_pts_predsdf.pt'))
+
+ print("Stopped: cps!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
+
+ if self.hps_finished:
+ folder = file_utils.nerf_results_sdf_inspection_subdir(
+ self.cfg['debug_dir'], self.cfg['bundlesdf_run_id'])
+ if op.exists(op.join(folder, 'hps_near_pcd.pt')):
+ pass
+ else:
+ print(f"{self.global_step=}")
+ all_hps_sdf = torch.cat(self.all_hps_sdf, 0)
+ all_hps_empty = torch.cat(self.all_hps_empty, 0)
+ all_hps_sdf_predsdf = torch.cat(self.all_hps_sdf_predsdf, 0)
+ all_hps_empty_predsdf = torch.cat(self.all_hps_empty_predsdf, 0)
+ all_hps_sdf_gtsdflb = torch.cat(self.all_hps_sdf_gtsdflb, 0)
+ all_hps_empty_gtsdflb = torch.cat(self.all_hps_empty_gtsdflb, 0)
+ torch.save(all_hps_sdf,
+ op.join(folder, 'hps_near_pcd.pt'))
+ torch.save(all_hps_empty,
+ op.join(folder, 'hps_empty_pcd.pt'))
+ torch.save(all_hps_sdf_predsdf,
+ op.join(folder, 'hps_near_sdf_pred.pt'))
+ torch.save(all_hps_empty_predsdf,
+ op.join(folder, 'hps_empty_sdf_pred.pt'))
+ torch.save(all_hps_sdf_gtsdflb,
+ op.join(folder, 'hps_near_sdf_gt.pt'))
+ torch.save(all_hps_empty_gtsdflb,
+ op.join(folder, 'hps_empty_sdf_gt.pt'))
+ print("Stopped: hps!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
+
+ return
+
+ def compute_hyperplane_constrained_loss(self, batch_vs, truncation):
+ """Compute hyperplane-constrained SDF loss."""
+ hc_sdf_loss = torch.tensor(0) #.to(batch_vs.device)
+ extras = []
+ if self.contact_in_cam:
+ if batch_vs is None:
+ effect_step = -1
+ else:
+ hc_sdf_loss, extras = self.hyperplane_loss_cic(batch_vs, truncation)
+ else:
+ hc_sdf_loss, extras = self.hyperplane_loss_global(truncation)
+ loss_weighted = {
+ 'hc_sdf_loss': hc_sdf_loss,
+ }
+ loss_unweighted = {
+ 'hc_sdf_loss': hc_sdf_loss,
+ }
+ return loss_weighted, loss_unweighted, extras
+
+ def compute_fs_rgb_loss(self, extras, front_mask, sample_weights):
+ fs_rgb_loss = torch.tensor(0).to(front_mask.device)
+ if self.cfg['fs_rgb_weight']>0:
+ fs_rgb_loss = ((
+ ((torch.sigmoid(extras['raw'][...,:3])-1) * front_mask[...,None])**2) \
+ * sample_weights[...,None]).mean()
+
+ loss_weighted = {
+ 'fs_rgb_loss': fs_rgb_loss * self.cfg['fs_rgb_weight'],
+ }
+ loss_unweighted = {
+ 'fs_rgb_loss': fs_rgb_loss,
+ }
+ return loss_weighted, loss_unweighted
+
+ def compute_eikonal_loss(self, extras, sdf):
+ eikonal_loss = torch.tensor(0).to(sdf.device)
+ if self.cfg['eikonal_weight']>0:
+ nerf_normals = extras['normals']
+ eikonal_loss = ((torch.norm(nerf_normals[sdf<1], dim=-1)-1)**2).mean()
+ loss_weighted = {
+ 'eikonal_loss': eikonal_loss * self.cfg['eikonal_weight'],
+ }
+ loss_unweighted = {
+ 'eikonal_loss': eikonal_loss,
+ }
+ return loss_weighted, loss_unweighted
+
+ def compute_regularization_losses(self):
+ """Compute regularization losses."""
+ reg_features = torch.tensor(0)
+ if self.models['feature_array'] is not None:
+ reg_features = (self.models['feature_array'].data ** 2).mean()
+
+ pose_reg = torch.tensor(0)
+ if self.models['pose_array'] is not None:
+ pose_reg = self.models['pose_array'].data[1:].norm()
+
+ loss_weighted = {
+ 'feature_reg': reg_features * self.cfg['feature_reg_weight'],
+ 'pose_reg': pose_reg * self.cfg['pose_reg_weight'],
+ }
+ loss_unweighted = {
+ 'feature_reg': reg_features,
+ 'pose_reg': pose_reg,
+ }
+ return loss_weighted, loss_unweighted
+
+ def save_images(self):
+ """Save rendered images for visualization."""
+ # Implementation for saving images periodically
+ ids = torch.unique(self.rays[:, self.ray_frame_id_slice]
+ ).data.cpu().numpy().astype(int).tolist()
+ ids.sort()
+ last = ids[-1]
+ ids = ids[::max(1,len(ids)//5)]
+ if last not in ids:
+ ids.append(last)
+ canvas = []
+ for frame_idx in ids:
+ rgb, depth, ray_mask, gt_rgb, gt_depth, _ = self.render_images(frame_idx)
+ mask_vis = (rgb*255*0.2 + ray_mask*0.8).astype(np.uint8)
+ mask_vis = np.clip(mask_vis,0,255)
+ rgb = np.concatenate((rgb,gt_rgb),axis=1)
+ far = self.cfg['far']*self.cfg['sc_factor']
+ gt_depth = np.clip(gt_depth, self.cfg['near']*self.cfg['sc_factor'], far)
+ depth_vis = np.concatenate((to8b(depth / far), to8b(gt_depth / far)),
+ axis=1)
+ depth_vis = np.tile(depth_vis[...,None],(1,1,3))
+ row = np.concatenate((to8b(rgb),depth_vis,mask_vis),axis=1)
+ canvas.append(row)
+ canvas = np.concatenate(canvas,axis=0).astype(np.uint8)
+ filename = op.join(self.cfg['nerf_temp_dir'],
+ f'image_step_{self.global_step:07d}.png')
+ imageio.imwrite(filename, canvas)
+ if self._run is not None:
+ self._run.add_artifact(dir)
+ return
+
+ def log_metrics(self, valid_samples, valid_rays, total_loss, loss_unwted, truncation):
+ """Log training metrics."""
+
+ msg = f"Iter: {self.global_step}, valid_samples: " + \
+ f"{valid_samples.sum()}/{torch.numel(valid_samples)}, " + \
+ f"valid_rays: {valid_rays.sum()}/{torch.numel(valid_rays)}, "
+ metrics = {
+ 'loss':total_loss.item(),
+ 'truncation(meter)': truncation/self.cfg['sc_factor'],
+ }
+ for key, value in loss_unwted.items():
+ if key not in metrics:
+ metrics[key] = value.item()
+ for k in metrics.keys():
+ msg += f"{k}: {metrics[k]:.7f}, "
+ msg += "\n"
+ logging.info(msg)
+
+ if self._run is not None:
+ for k in metrics.keys():
+ self._run.log_scalar(k,metrics[k],self.global_step)
+ return
+
+ def save_mesh(self):
+ """Save extracted mesh."""
+ # Implementation for saving mesh periodically
+ with torch.no_grad():
+ model = self.models['model_fine'] if self.models['model_fine'] is \
+ not None else self.models['model']
+ mesh = self.extract_mesh(isolevel=0,
+ voxel_size=self.cfg['mesh_resolution'])
+ self.mesh = copy.deepcopy(mesh)
+ if mesh is not None:
+ dir = op.join(
+ self.cfg['nerf_temp_dir'],
+ f'step_{self.global_step:07d}_mesh_normalized_space.obj')
+ mesh.export(dir)
+ if self._run is not None:
+ self._run.add_artifact(dir)
+ dir = op.join(
+ self.cfg['nerf_temp_dir'],
+ f'step_{self.global_step:07d}_mesh_real_world.obj')
+ if self.models['pose_array'] is not None:
+ _,offset = get_optimized_poses_in_real_world(
+ self.poses, self.models['pose_array'],
+ translation=self.cfg['translation'],
+ sc_factor=self.cfg['sc_factor'])
+ else:
+ offset = np.eye(4)
+ mesh = mesh_to_real_world(
+ mesh, offset, translation=self.cfg['translation'],
+ sc_factor=self.cfg['sc_factor'])
+ mesh.export(dir)
+ if self._run is not None:
+ self._run.add_artifact(dir)
+ return
+
+ def save_optimized_poses(self):
+ """Save optimized poses."""
+ # Implementation for saving optimized poses periodically
+ if self.models['pose_array'] is not None:
+ optimized_poses,offset = get_optimized_poses_in_real_world(
+ self.poses, self.models['pose_array'],
+ translation=self.cfg['translation'], sc_factor=self.cfg['sc_factor'])
+ else:
+ optimized_poses = self.poses
+ dir = op.join(self.cfg['nerf_temp_dir'],
+ f'step_{self.global_step:07d}_optimized_poses.txt')
+ np.savetxt(dir,optimized_poses.reshape(-1,4))
+ if self._run is not None:
+ self._run.add_artifact(dir)
+ return
+
+ def train_loop(self, batch, batch_ps=None, batch_vs=None):
+ """
+ Refactored training loop for NeRFRunner.
+
+ Args:
+ batch (torch.Tensor): Input batch of rays.
+ batch_ps (torch.Tensor, optional): Contact points batch.
+ batch_vs (torch.Tensor, optional): Hyperplane-constrained points batch.
+ """
+ # Extract targets from the batch
+ target_s = batch[:, self.ray_rgb_slice] # Color (N,3)
+ target_d = batch[:, self.ray_depth_slice] # Normalized scale (N)
+ target_mask = batch[:, self.ray_mask_slice].bool().reshape(-1)
+ frame_ids = batch[:, self.ray_frame_id_slice]
+
+ # Pre-computations
+ rgb, extras = self.render(
+ rays=batch, ray_ids=self.data_loader.batch_ray_ids.cuda(), frame_ids=frame_ids,
+ depth=target_d, lindisp=False, perturb=True,
+ raw_noise_std=self.cfg['raw_noise_std'],
+ near=batch[:, self.ray_near_slice], far=batch[:, self.ray_far_slice],
+ get_normals=False
+ )
+ valid_samples = extras['valid_samples'] # (N_ray, N_samples)
+ z_vals = extras['z_vals'] # (N_rand, N_samples + N_importance)
+ sdf = extras['raw'][..., -1]
+ pts = extras['pts']
+
+ # Compute ray and sample weights
+ ray_weights, sample_weights, valid_rays = self.compute_weights(batch, sdf, valid_samples, frame_ids)
+ truncation = self.get_truncation()
+
+ # Initialize total loss
+ total_loss = torch.tensor(0.0).cuda()
+ all_losses = {}
+
+ # Compute individual loss terms
+ loss_wted_rgb, loss_unwted_rgb = self.compute_rgb_loss(rgb, target_s, ray_weights, extras)
+ total_loss += loss_wted_rgb['rgb_loss'] + loss_wted_rgb['rgb0_loss']
+ all_losses.update(loss_unwted_rgb)
+
+ loss_wted_dep, loss_unwted_dep = self.compute_depth_loss(sdf, z_vals, target_d, ray_weights)
+ total_loss += loss_wted_dep['depth_loss']
+ all_losses.update(loss_unwted_dep)
+
+ loss_wted_sdf, loss_unwted_sdf, extras_sdf = self.compute_sdf_losses(sdf, z_vals, target_d, sample_weights, batch, truncation)
+ total_loss += loss_wted_sdf['uncertain_fs_loss'] + loss_wted_sdf['empty_loss'] + loss_wted_sdf['sdf_loss']
+ all_losses.update(loss_unwted_sdf)
+ front_mask, empty_space_mask, sdf_mask, uncertain_fs_mask, uncertain_fs_mask_far = extras_sdf['masks']
+ gt_sdf = extras_sdf['gt_sdf']
+
+ pts = pts.reshape(sdf_mask.shape[0], -1, 3)
+ near_pts = pts[sdf_mask]
+ near_sdf = sdf[sdf_mask]
+ near_sdf_gt = gt_sdf[sdf_mask]
+ loss_wted_cvv, loss_unwted_cvv = self.compute_convexity_vision_loss(near_pts, near_sdf, near_sdf_gt)
+ all_losses.update(loss_unwted_cvv)
+ total_loss += loss_wted_cvv['convexity_vision_loss']
+
+ self.log_pts(pts, sdf, extras_sdf['masks'])
+
+ if hasattr(self, 'ps') and hasattr(self, 'sdfs'):
+ loss_wted_sup, loss_unwted_sup, extras_sup = self.compute_support_point_loss(batch_ps, truncation)
+ loss_wted_cvx, loss_unwted_cvx = self.compute_convexity_loss(extras_sup, near_sdf_gt, near_pts, batch_ps)
+ if self.cfg['use_pll_for_training']:
+ total_loss += loss_wted_sup['support_point_loss'] + loss_wted_cvx['convexity_loss']
+ all_losses.update(loss_unwted_sup)
+ all_losses.update(loss_unwted_cvx)
+ self.log_slices_w_support(extras_sup)
+ else:
+ self.log_slices_wo_support()
+
+ if hasattr(self, 'vs') and hasattr(self, 'sdf_bounds'):
+ loss_wted_hpc, loss_unwted_hpc, extras_hpc = self.compute_hyperplane_constrained_loss(batch_vs, truncation)
+ if self.cfg['use_pll_for_training'] and self.cfg['use_hpc']:
+ total_loss += loss_wted_hpc['hc_sdf_loss']
+ all_losses.update(loss_unwted_hpc)
+ self.log_hpc_pts(extras_hpc)
+
+ self.save_all_pts()
+
+ gradient_loss = torch.tensor(0).to(sdf.device)
+ all_losses['gradient_loss'] = gradient_loss
+
+ loss_wted_fs_rgb, loss_unwted_fs_rgb = self.compute_fs_rgb_loss(extras, front_mask, sample_weights)
+ total_loss += loss_wted_fs_rgb['fs_rgb_loss']
+ all_losses.update(loss_unwted_fs_rgb)
+
+ loss_wted_eik, loss_unwted_eik = self.compute_eikonal_loss(extras, sdf)
+ total_loss += loss_wted_eik['eikonal_loss']
+ all_losses.update(loss_unwted_eik)
+
+ ### Point cloud loss (currently all zeros)
+ point_cloud_loss = torch.tensor(0).to(sdf.device)
+ point_cloud_normal_loss = torch.tensor(0).to(sdf.device)
+ all_losses['point_cloud_loss'] = point_cloud_loss
+ all_losses['point_cloud_normal_loss'] = point_cloud_normal_loss
+
+ # Regularization losses
+ loss_wted_reg, loss_unwted_reg = self.compute_regularization_losses()
+ total_loss += loss_wted_reg['feature_reg'] + loss_wted_reg['pose_reg']
+ all_losses.update(loss_unwted_reg)
+
+ ### Variation loss
+ variation_loss = torch.tensor(0)
+ all_losses['variation_loss'] = variation_loss
+
+ # Backpropagation
+ self.optimizer.zero_grad()
+ self.amp_scaler.scale(total_loss).backward()
+ self.amp_scaler.step(self.optimizer)
+ self.amp_scaler.update()
+
+ # Learning rate scheduling
+ if self.global_step % 10 == 0 and self.global_step > 0:
+ self.schedule_lr()
+
+ # Save weights and images periodically
+ if self.global_step % self.cfg['i_weights'] == 0 and self.global_step > 0:
+ self.save_weights(
+ out_file=op.join(self.cfg['nerf_temp_dir'], f'model_latest.pth'),
+ models=self.models
+ )
+
+ if self.global_step % self.cfg['i_img'] == 0 and self.global_step > 0:
+ self.save_images()
+
+ # Log metrics
+ if self.global_step % self.cfg['i_print'] == 0:
+ self.log_metrics(valid_samples, valid_rays, total_loss, all_losses, truncation)
+
+ # Save mesh periodically
+ if self.global_step % self.cfg['i_mesh'] == 0 and self.global_step > 0:
+ self.save_mesh()
+
+ # Save optimized poses periodically
+ if self.global_step % self.cfg['i_pose'] == 0 and self.global_step > 0:
+ self.save_optimized_poses()
diff --git a/offscreen_renderer.py b/bundlenets/offscreen_renderer.py
similarity index 98%
rename from offscreen_renderer.py
rename to bundlenets/offscreen_renderer.py
index ba55e87..0333aa2 100644
--- a/offscreen_renderer.py
+++ b/bundlenets/offscreen_renderer.py
@@ -10,7 +10,8 @@
import os,sys,time
os.environ["PYOPENGL_PLATFORM"] = "egl"
code_path = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(code_path)
+if code_path not in sys.path:
+ sys.path.append(code_path)
import open3d as o3d
import numpy as np
from PIL import Image
@@ -18,7 +19,7 @@
import time
import trimesh
import pyrender
-from Utils import *
+from bundlenets.Utils import *
from transformations import *
import numpy as np
from PIL import Image
diff --git a/bundlenets/run_custom.py b/bundlenets/run_custom.py
new file mode 100644
index 0000000..14b4451
--- /dev/null
+++ b/bundlenets/run_custom.py
@@ -0,0 +1,610 @@
+# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+
+from bundlenets.bundlesdf import *
+import click
+import os
+import os.path as op
+import shlex
+
+from bundlenets import mesh_utils, file_utils
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
+from bundlenets.segmentation_utils import Segmenter
+import sys
+sys.path.append(op.join(op.abspath(op.dirname(__file__)), '..'))
+import importlib
+cnets_math_utils = importlib.import_module("cnets-data-generation.math_utils")
+sys.path.append(op.join(op.abspath(op.dirname(__file__)), '../cnets-data-generation'))
+cnets_file_utils = importlib.import_module("cnets-data-generation.file_utils")
+sys.path.pop()
+
+
+START_NERF_KEYFRAMES_INITIAL_TRACKING_ON = 5
+START_NERF_KEYFRAMES_INITIAL_TRACKING_OFF = 2000
+
+START_NERF_KEYFRAMES_THEREAFTER = 5
+
+
+def run_one_video(bundlesdf_run_id: str, video_dir: str, out_dir: str,
+ geometry_dir: str, use_pll: bool, use_hpc: bool, debug_level: int,
+ stride: int, online_nerf: bool, use_segmenter: bool = False,
+ use_gui: bool = False, toss_frames: list = [],
+ contact_in_cam_dir: str = None, cvw: float = 0,
+ octree_convex: bool = False, occluded: str = None,
+ octree_contact_filter: bool = False,
+ convexity_weight: float = 1, offset_frames: int = 1,
+ power_interp: float = 0.5) -> None:
+ """Run the pipeline for one video. First runs the tracker with no geometry
+ interactions, then calls run_one_video_global_nerf to refine the poses and
+ geometry.
+
+ Args:
+ bundlesdf_run_id: experiment name (already tested before calling this
+ function to be unique).
+ video_dir: input video directory, e.g. data/cube_2.
+ out_dir: output results directory, e.g.
+ results/cube_2/bundlesdf_iteration_1/bundlesdf_id_1.
+ geometry_dir: input geometry directory containing PLL results, if any (can
+ be None if no PLL results are to be used).
+ use_pll: whether to use PLL information to regress SDF. If True, the
+ geometry_dir should be provided. If False and the geometry_dir is still
+ provided, then the PLL-related loss terms will be computed but not used
+ for training the SDF.
+ debug_level: logging level.
+ stride: interval of frames to run; 1 means using every frame.
+ use_segmenter: whether to use the segmenter.
+ use_gui: whether to use the GUI.
+ """
+ set_seed(0)
+
+ # BundleTrack configuration.
+ cfg_bundletrack = file_utils.load_base_toss_bundlesdf_configuration()
+ cfg_bundletrack['bundlesdf_run_id'] = bundlesdf_run_id
+ cfg_bundletrack['SPDLOG'] = debug_level
+ cfg_bundletrack['debug_dir'] = out_dir + "/" # Needs / for BundleTrack
+ cfg_bundletrack['toss_frames'] = toss_frames
+ file_utils.save_run_bundlesdf_configuration(cfg_bundletrack, out_dir)
+
+ # NeRF configuration.
+ cfg_nerf = file_utils.load_base_toss_nerf_configuration()
+ cfg_nerf['bundlesdf_run_id'] = bundlesdf_run_id
+ cfg_nerf['video_dir'] = video_dir
+ cfg_nerf['debug_dir'] = out_dir + "/" # Needs / for BundleTrack
+ cfg_nerf['nerf_temp_dir'] = file_utils.nerf_temp_subdir(out_dir)
+ cfg_nerf['nerf_dir'] = file_utils.nerf_results_subdir(out_dir,
+ bundlesdf_run_id)
+ cfg_nerf['geometry_dir'] = geometry_dir
+ cfg_nerf['use_pll_for_training'] = use_pll
+ cfg_nerf['use_hpc'] = use_hpc
+ cfg_nerf['contact_in_cam_dir'] = contact_in_cam_dir
+ cfg_nerf['convexity_weight'] = convexity_weight
+ cfg_nerf['convexity_vision_weight'] = cvw
+ cfg_nerf['octree_with_cnet'] = True
+ cfg_nerf['octree_convex'] = octree_convex
+ cfg_nerf['octree_contact_filter'] = octree_contact_filter
+ cfg_nerf['power_interp'] = power_interp
+ file_utils.save_run_outer_nerf_configuration(cfg_nerf, out_dir)
+
+ if use_segmenter:
+ segmenter = Segmenter()
+
+ start_nerf_frame = START_NERF_KEYFRAMES_INITIAL_TRACKING_ON \
+ if online_nerf else START_NERF_KEYFRAMES_INITIAL_TRACKING_OFF
+ tracker = BundleSdf(
+ cfg_track_yaml=file_utils.get_run_bundlesdf_configuration_filepath(out_dir),
+ cfg_nerf_yaml=file_utils.get_run_outer_nerf_configuration_filepath(out_dir),
+ start_nerf_keyframes=start_nerf_frame,
+ use_gui=use_gui,
+ )
+
+ # TODO: is shorter_side doing the right thing? at least it shouldn't be a
+ # magic number
+ reader = YcbineoatReader(video_dir=video_dir, shorter_side=480, occluded=occluded, offset_frames=offset_frames)
+
+ # Run the tracker on the video data.
+ for i in range(0, len(reader.color_files), stride):
+ # Load video frame and resize to match depth resolution.
+ color_file = reader.color_files[i]
+ color = cv2.imread(color_file)
+ depth = reader.get_depth(i)
+ H, W = depth.shape[:2]
+ color = cv2.resize(color, (W, H), interpolation=cv2.INTER_NEAREST)
+ depth = cv2.resize(depth, (W, H), interpolation=cv2.INTER_NEAREST)
+
+ # Load mask, ensuring it's in depth resolution.
+ if use_segmenter:
+ # milk_1-5 masks have values on 60 on background person, so we need to threshold them in segmenter
+ mask = segmenter.run(color_file.replace(reader.rgb_folder,reader.mask_folder))
+ else:
+ mask = reader.get_mask(i)
+ mask = cv2.resize(mask, (W, H), interpolation=cv2.INTER_NEAREST)
+
+ # Erode the mask if necessary.
+ if cfg_bundletrack['erode_mask'] > 0:
+ kernel = np.ones((cfg_bundletrack['erode_mask'],
+ cfg_bundletrack['erode_mask']), np.uint8)
+ mask = cv2.erode(mask.astype(np.uint8), kernel)
+
+ # Run the tracker.
+ tracker.run(color=color,
+ depth=depth,
+ K=reader.K.copy(),
+ id_str=reader.id_strs[i],
+ mask=mask,
+ occ_mask=None,
+ pose_in_model=np.eye(4))
+
+ tracker.on_finish()
+
+ # Subsequently train the NeRF using the tracking results.
+ run_one_video_global_nerf(nerf_bundlesdf_run_id=bundlesdf_run_id,
+ video_dir=video_dir, out_dir=out_dir,
+ geometry_dir=geometry_dir, use_pll=use_pll, use_hpc=use_hpc,
+ contact_in_cam_dir=contact_in_cam_dir, cvw=cvw,
+ octree_convex=octree_convex, occluded=occluded,
+ octree_contact_filter=octree_contact_filter,
+ convexity_weight=convexity_weight, offset_frames=offset_frames,
+ power_interp=power_interp)
+
+
+def run_one_video_global_nerf(nerf_bundlesdf_run_id: str, video_dir: str,
+ out_dir: str, geometry_dir: str, use_pll: bool, use_hpc: bool,
+ contact_in_cam_dir: str, cvw: float, octree_convex: bool,
+ occluded: str, octree_contact_filter: bool,
+ convexity_weight: float, offset_frames: int,
+ power_interp: float) -> None:
+ """Run NeRF training given prior tracking results.
+
+ Args:
+ nerf_bundlesdf_run_id: experiment name (already ensured to be unique under
+ the associated tracking experiment encoded in out_dir).
+ video_dir: input video directory, e.g. data/cube_2.
+ out_dir: output results directory, e.g.
+ results/cube_2/bundlesdf_iteration_1/bundlesdf_id_1. Note that the
+ tracking results' associated BundleSDF experiment ID can be different from
+ this NeRF experiment ID.
+ geometry_dir: input geometry directory containing PLL results, if any (can
+ be None if no PLL results are to be used).
+ use_pll: whether to use PLL information to regress SDF. If True, the
+ geometry_dir should be provided. If False and the geometry_dir is still
+ provided, then the PLL-related loss terms will be computed but not used
+ for training the SDF.
+ """
+ set_seed(0)
+
+ # No need to load BundleTrack configuration.
+
+ # NeRF configuration: Incorporate some changes from the original NeRF
+ # configuration used when running run_one_video.
+ cfg_nerf = file_utils.load_run_outer_nerf_configuration(out_dir)
+
+ # Update the run ID and geometry/NeRF directories, which could be different
+ # from the original tracking results' associated NeRF configuration.
+ assert cfg_nerf['debug_dir'] == out_dir + "/", f'Expected cfg_nerf in ' + \
+ f'{file_utils.get_run_outer_nerf_configuration_filepath(out_dir)} to ' + \
+ f'have debug_dir={out_dir} but found {cfg_nerf["debug_dir"]}.'
+ cfg_nerf['bundlesdf_run_id'] = nerf_bundlesdf_run_id
+ cfg_nerf['nerf_dir'] = file_utils.nerf_results_subdir(
+ out_dir, nerf_bundlesdf_run_id)
+ cfg_nerf['geometry_dir'] = geometry_dir
+ cfg_nerf['use_pll_for_training'] = use_pll
+ cfg_nerf['use_hpc'] = use_hpc
+ cfg_nerf['contact_in_cam_dir'] = contact_in_cam_dir
+
+ # Update the NeRF settings.
+ cfg_nerf['n_step'] = 2000
+ cfg_nerf['N_samples'] = 64
+ cfg_nerf['N_samples_around_depth'] = 256
+ cfg_nerf['first_frame_weight'] = 1
+ cfg_nerf['finest_res'] = 256
+ cfg_nerf['num_levels'] = 16
+ cfg_nerf['mesh_resolution'] = 0.002 #0.001 for contact-related tiny mesh extraction
+ cfg_nerf['n_train_image'] = 500
+ cfg_nerf['fs_sdf'] = 0.1
+ cfg_nerf['frame_features'] = 2
+ cfg_nerf['rgb_weight'] = 100
+ cfg_nerf['contact_pts_weight'] = 1
+ cfg_nerf['support_pts_weight'] = 2
+ cfg_nerf['power_interp'] = power_interp
+ cfg_nerf['convexity_weight'] = convexity_weight
+ cfg_nerf['convexity_vision_weight'] = cvw
+ cfg_nerf['octree_with_cnet'] = True
+ cfg_nerf['octree_convex'] = octree_convex
+ cfg_nerf['octree_contact_filter'] = octree_contact_filter
+ cfg_nerf['hc_sdf_weight_lower'] = 1
+ cfg_nerf['hc_sdf_weight_upper'] = 0.1
+ cfg_nerf['pretrain_eikonal_weight'] = 0.1
+ cfg_nerf['pretrain_normal_direction_weight'] = 100
+ cfg_nerf['eps_minimal_surface'] = 0.5
+ cfg_nerf['pretrain_minimal_surface_weight'] = 100
+ cfg_nerf['pretrain_hessian_weight'] = 10
+ cfg_nerf['pretrain_finite_diff_weight'] = 10
+ cfg_nerf['i_img'] = np.inf
+ cfg_nerf['i_mesh'] = cfg_nerf['i_img']
+ cfg_nerf['i_nerf_normals'] = cfg_nerf['i_img']
+ cfg_nerf['i_save_ray'] = cfg_nerf['i_img']
+
+ # Save the NeRF settings for this run.
+ file_utils.save_run_online_nerf_configuration(cfg_nerf, out_dir)
+
+ # Instantiate BundleSDF and run the global NeRF training.
+ tracker = BundleSdf(
+ cfg_track_yaml=file_utils.get_run_bundlesdf_configuration_filepath(out_dir),
+ cfg_nerf_yaml=file_utils.get_run_online_nerf_configuration_filepath(out_dir),
+ start_nerf_keyframes=START_NERF_KEYFRAMES_THEREAFTER
+ )
+
+ reader = YcbineoatReader(video_dir=video_dir, downscale=1, occluded=occluded, offset_frames=offset_frames)
+
+ tracker.run_global_nerf(reader=reader, get_texture=True, tex_res=512)
+ tracker.on_finish()
+
+ print(f"Done.")
+
+
+def postprocess_mesh(out_folder):
+ mesh_files = sorted(glob.glob(f'{out_folder}/**/nerf/*normalized_space.obj',recursive=True))
+ print(f"Using {mesh_files[-1]}")
+ os.makedirs(f"{out_folder}/mesh/",exist_ok=True)
+
+ print(f"\nSaving meshes to {out_folder}/mesh/\n")
+
+ mesh = trimesh.load(mesh_files[-1])
+ with open(f'{os.path.dirname(mesh_files[-1])}/config.yml','r') as ff:
+ cfg = yaml.load(ff)
+ tf = np.eye(4)
+ tf[:3,3] = cfg['translation']
+ tf1 = np.eye(4)
+ tf1[:3,:3] *= cfg['sc_factor']
+ tf = tf1@tf
+ mesh.apply_transform(np.linalg.inv(tf))
+ mesh.export(f"{out_folder}/mesh/mesh_real_scale.obj")
+
+ components = trimesh_split(mesh, min_edge=1000)
+ best_component = None
+ best_size = 0
+ for component in components:
+ dists = np.linalg.norm(component.vertices,axis=-1)
+ if len(component.vertices)>best_size:
+ best_size = len(component.vertices)
+ best_component = component
+ mesh = trimesh_clean(best_component)
+
+ mesh.export(f"{out_folder}/mesh/mesh_biggest_component.obj")
+ mesh = trimesh.smoothing.filter_laplacian(mesh,lamb=0.5, iterations=3, implicit_time_integration=False, volume_constraint=True, laplacian_operator=None)
+ mesh.export(f'{out_folder}/mesh/mesh_biggest_component_smoothed.obj')
+
+
+def draw_pose(out_folder: str):
+ K = np.loadtxt(op.join(out_folder, 'cam_K.txt')).reshape(3,3)
+ color_files = sorted(glob.glob(op.join(out_folder, 'color/*')))
+ mesh = trimesh.load(op.join(out_folder, 'textured_mesh.obj'))
+ to_origin, extents = trimesh.bounds.oriented_bounds(mesh)
+ bbox = np.stack([-extents/2, extents/2], axis=0).reshape(2,3)
+ out_dir = op.join(out_folder, 'pose_vis')
+ os.makedirs(out_dir, exist_ok=True)
+ logging.info(f"Saving to {out_dir}")
+ for color_file in color_files:
+ color = imageio.imread(color_file)
+ pose = np.loadtxt(color_file.replace('.png','.txt').replace('color','ob_in_cam'))
+ pose = pose@np.linalg.inv(to_origin)
+ vis = draw_posed_3d_box(K, color, ob_in_cam=pose, bbox=bbox, line_color=(255,255,0))
+ id_str = os.path.basename(color_file).replace('.png','')
+ imageio.imwrite(f'{out_dir}/{id_str}.png', vis)
+
+
+@click.command()
+@click.option('--run-name', default="")
+@click.option('--tracking-run-name',
+ type=str,
+ default=None,
+ help="the BundleSDF run ID whose tracking results to use, if " + \
+ "just running NeRF.")
+@click.option('--vision-asset',
+ type=str,
+ default=None,
+ help="directory of the asset folder e.g. cube_2, assumed to " + \
+ "be in a vision_{SYSTEM}/ folder; encodes system and tosses.")
+@click.option('--mode',
+ type=str,
+ default="run_video",
+ help="run_video / global_refine / get_mesh / " + \
+ "combined_learning / draw_pose / init_nerf / test_loss")
+@click.option('--pll-id',
+ type=str,
+ default=None,
+ help="what PLL run ID associated with geometry outputs to use" + \
+ " if provided, otherwise don't use PLL results.")
+@click.option('--cycle-iteration',
+ type=int,
+ default=None,
+ help="the current BundleSDF iteration number (1 means running" + \
+ " for the first time i.e. without any PLL data, 0 means " + \
+ "with PLL data generated from TagSLAM tracking, anything " + \
+ "higher means with PLL data generated from previous " + \
+ "BundleSDF runs).")
+@click.option('--use-pll/--no-pll',
+ default=None,
+ help="whether to use PLL information to regress SDF. If not " + \
+ "provided, will use PLL results to regress SDF if a PLL ID " + \
+ "is provided.")
+@click.option('--use-hpc/--no-hpc',
+ default=True,
+ help="whether to use hyperplane constraint information to regress SDF.")
+@click.option('--clear-data/--keep-data',
+ default=False,
+ help="whether to clear experiment results folder before running")
+@click.option('--use-segmenter',
+ type=bool,
+ default=False)
+@click.option('--use-gui',
+ type=bool,
+ default=False)
+@click.option('--stride',
+ type=int,
+ default=1,
+ help='interval of frames to run; 1 means using every frame')
+@click.option('--debug-level',
+ type=int,
+ default=2,
+ help='higher means more logging')
+@click.option('--online-nerf',
+ is_flag=True,
+ help="whether to use online nerf during tracking")
+@click.option('--cic',
+ is_flag=True,
+ help="whether or not contact_in_cam is used")
+@click.option('--ckf',
+ is_flag=True,
+ help="whether or not use all toss frames as keyframes (contact key frames)")
+@click.option('--convexity-vision-weight', '-cvw',
+ type=float,
+ default=1,
+ help="convexity_vision_weight used in the BundleSDF regardless of the contact info")
+@click.option('--octree-convex/--no-octree-convex',
+ help="whether or not to use octree_convex",
+ default=True)
+@click.option('--share-tracking',
+ is_flag=True,
+ help="whether to share tracking results with the last iteration")
+@click.option('--occluded',
+ type=str,
+ default=None,
+ help="if occluded, specify the label")
+@click.option('--octree-contact-filter', '-ocf',
+ is_flag=True,
+ help="whether to use vision point octree to filter contact points")
+@click.option('--convexity-weight', '-cw',
+ type=float,
+ default=1,
+ help="convexity weight used in the BundleSDF")
+@click.option('--offset-frames',
+ type=int,
+ default=1,
+ help="starting frame to offset from")
+@click.option('-pip', '--power-interp',
+ type=float,
+ default=0.5,
+ help="power interpolation factor. 1 means uniform. " + \
+ "<1 means more emphasis on the support points. " + \
+ ">1 means more emphasis on the visible points.")
+def main_command(run_name: str, tracking_run_name: str, vision_asset: str,
+ mode: str, pll_id: str, cycle_iteration: int, use_pll: bool, use_hpc: bool,
+ clear_data: bool, use_segmenter: bool, use_gui: bool,
+ stride: int, debug_level: int, online_nerf: bool, cic: bool, ckf: bool,
+ convexity_vision_weight: float, octree_convex: bool, share_tracking: bool,
+ occluded: str, octree_contact_filter: bool, convexity_weight: float,
+ offset_frames: int, power_interp: float):
+ # pylint: disable=too-many-arguments
+ # Infer the cycle iteration if not provided.
+ if cycle_iteration is None:
+ cycle_iteration = 1 if pll_id is None else 2
+ assert cycle_iteration >= 0, f'Invalid cycle_iteration: {cycle_iteration}.'
+
+ # Infer whether to use PLL results for training if not provided.
+ if use_pll:
+ assert pll_id is not None, 'PLL ID must be provided if training with ' + \
+ 'PLL data.'
+ elif use_pll is None:
+ use_pll = True if pll_id is not None else False
+
+ if use_pll is False:
+ use_hpc = False
+
+ # Set random seed before doing anything else.
+ print('Set random seed before doing anything.')
+ set_seed(0)
+
+ # 1) Create a BundleSDF experiment run ID.
+ bundlesdf_id = file_utils.format_run_id(run_name, is_bundlesdf_not_pll=True)
+ pll_id = file_utils.format_run_id(pll_id, is_bundlesdf_not_pll=False)
+
+ # 2) Determine if running tracker and NeRF or just NeRF only from an existing
+ # tracking run's results.
+ if tracking_run_name is not None:
+ track_bundlesdf_id = file_utils.format_run_id(
+ tracking_run_name, is_bundlesdf_not_pll=True)
+
+ tracking_out_dir = file_utils.results_dir(
+ vision_asset, cycle_iteration, track_bundlesdf_id, create=False)
+ assert op.isdir(tracking_out_dir), f'Invalid tracking run ID: ' + \
+ f'{tracking_run_name} not found in results directory (looked for ' + \
+ f'{tracking_out_dir}).'
+
+ else:
+ track_bundlesdf_id = bundlesdf_id
+ tracking_out_dir = file_utils.results_dir(
+ vision_asset, cycle_iteration, bundlesdf_id, create=False)
+
+ # 3) Decode the provided asset directory to obtain the system and start/end
+ # tosses.
+ assert vision_asset in os.listdir(file_utils.top_video_dir()), \
+ f'Invalid asset directory: {vision_asset} not found in data directory.'
+ assert '_' in vision_asset, f'Invalid asset directory: {vision_asset}.'
+ system = f"vision_{'_'.join(vision_asset.split('_')[:-1])}"
+
+ assert file_utils.check_valid_system(system), f'Invalid vision system ' + \
+ f'in {vision_asset=}.'
+ # assert system in file_utils.VISION_SYSTEMS or 'robot' in system, f'Invalid vision system ' + \
+ # f'in {vision_asset=}.'
+
+ if system == 'vision_2022-11-18-15-10-24':
+ # Bundlesdf demo data, no tosses.
+ pass
+ else:
+ toss_key = vision_asset.split('_')[-1]
+ start_toss = int(toss_key.split('-')[0])
+ end_toss = start_toss if '-' not in toss_key else \
+ int(toss_key.split('-')[-1])
+ assert start_toss <= end_toss, f'Invalid toss range: {start_toss} ' + \
+ f'-{end_toss} inferred from {vision_asset=}.'
+
+ # 4) Directory management for video inputs, geometry inputs, tracking inputs/
+ # outputs.
+ video_dir = file_utils.video_dir(vision_asset)
+ # Subtract one from current cycle iteration to get the geometry directory.
+ if pll_id is None:
+ geometry_cycle_iteration = 0
+ elif share_tracking:
+ geometry_cycle_iteration = cycle_iteration
+ else:
+ geometry_cycle_iteration = cycle_iteration-1
+ geometry_dir = file_utils.geometry_dir(vision_asset, geometry_cycle_iteration,
+ pll_id)
+ out_dir = file_utils.results_dir(vision_asset, cycle_iteration,
+ track_bundlesdf_id, create=False)
+ nerf_dir = file_utils.nerf_results_subdir(out_dir, bundlesdf_id,
+ create=False)
+
+ # Option 1: Read the cnets-data-generation/assets/config.yaml for the interval of toss frames.
+ toss_frames = []
+ contact_in_cam_dir = None
+ if ckf:
+ assert not cic, 'Cannot use both ckf and cic.'
+ obj_name = vision_asset.split('_')[:-1]
+ obj_name = '_'.join(obj_name)
+ relative_start_frames = np.array([cnets_file_utils.load_field_from_yaml(
+ obj_name, toss_i, 'start_frame') for toss_i in range(
+ start_toss, end_toss+1)])
+ relative_end_frames = np.array([cnets_file_utils.load_field_from_yaml(
+ obj_name, toss_i, 'end_frame') for toss_i in range(
+ start_toss, end_toss+1)])
+ start_ros_times = np.array([cnets_file_utils.load_toss_time_from_yaml(
+ obj_name, toss_i, 'start_time', as_ros_time=True) for toss_i \
+ in range(start_toss, end_toss+1)])
+ cnets_data_gen_dir = cnets_file_utils.cnets_data_gen_dataset_dir(
+ vision_asset, check_exists=True)
+ bundlesdf_times = np.loadtxt(
+ op.join(cnets_data_gen_dir, 'bundlesdf_timestamps.txt'))
+
+ toss_start_frames = cnets_math_utils.convert_relative_frames_to_absolute(
+ relative_start_frames, bundlesdf_times, start_ros_times)
+ toss_end_frames = cnets_math_utils.convert_relative_frames_to_absolute(
+ relative_end_frames, bundlesdf_times, start_ros_times)
+ toss_frames = np.concatenate([np.arange(start+1, end) for start, end in zip(
+ toss_start_frames, toss_end_frames)]) # Exclude both start and end frames
+ toss_frames = toss_frames.tolist()
+ print(f"Mode ckf (contact keyframes), using toss frames: {toss_frames}")
+
+ if cic:
+ assert cycle_iteration > 1 or (cycle_iteration >= 1 and share_tracking), \
+ f'Invalid {cycle_iteration=} for contact_in_cam.'
+ # Option 2: Read the folder geometry/toss_id/bundlesdf_iteration_1/pll_id/contact_in_cam
+ # and these frames should be added to the keyframes
+ contact_in_cam_dir = file_utils.get_geometry_contact_in_cam_path(geometry_dir)
+ toss_frames = sorted(os.listdir(os.path.join(contact_in_cam_dir, 'from_support_points')))
+ toss_frames = [int(frame) for frame in toss_frames]
+ print(f"Mode cic (contact_in_cam), using toss frames: {toss_frames}")
+
+ # If doing tracking and NeRF training together, check to clear the full
+ # tracking results directory.
+ if op.exists(out_dir) and track_bundlesdf_id==bundlesdf_id:
+ if clear_data:
+ os.system(f'rm -r {out_dir}')
+ else:
+ print(f'Directory {out_dir} already exists and not set to ' \
+ + f'clear (run with --clear-data next time). Exiting.')
+ exit()
+ file_utils.assure_created(out_dir)
+
+ # If tracking already happened and just running NeRF, check to clear NeRF
+ # directory only.
+ if op.exists(nerf_dir):
+ if clear_data:
+ os.system(f'rm -r {nerf_dir}')
+ else:
+ print(f'Directory {nerf_dir} already exists and not set to ' \
+ + f'clear (run with --clear-data next time). Exiting.')
+ exit()
+ file_utils.assure_created(nerf_dir)
+
+ # Check the mode for subsequent actions.
+ if mode=='run_video':
+ run_one_video(
+ bundlesdf_run_id=bundlesdf_id,
+ video_dir=video_dir,
+ out_dir=out_dir,
+ geometry_dir=geometry_dir,
+ use_pll=use_pll,
+ use_hpc=use_hpc,
+ debug_level=debug_level,
+ stride=stride,
+ online_nerf=online_nerf,
+ use_segmenter=use_segmenter,
+ use_gui=use_gui,
+ toss_frames=toss_frames,
+ contact_in_cam_dir=contact_in_cam_dir,
+ cvw=convexity_vision_weight,
+ octree_convex=octree_convex,
+ occluded=occluded,
+ octree_contact_filter=octree_contact_filter,
+ convexity_weight=convexity_weight,
+ offset_frames=offset_frames,
+ power_interp=power_interp,
+ )
+
+ elif mode=='global_refine':
+ run_one_video_global_nerf(out_dir=out_dir)
+
+ elif mode=='get_mesh':
+ postprocess_mesh(out_folder=out_dir)
+
+ elif mode=='draw_pose':
+ draw_pose(out_folder=out_dir)
+
+ elif mode=='init_nerf':
+ run_one_video_global_nerf(out_dir=out_dir,
+ shapefile='gt_cube_simple.obj')
+
+ elif mode=='test_loss':
+ run_one_video_global_nerf(
+ nerf_bundlesdf_run_id=bundlesdf_id,
+ video_dir=video_dir,
+ out_dir=out_dir,
+ geometry_dir=geometry_dir,
+ use_pll=use_pll,
+ use_hpc=use_hpc,
+ contact_in_cam_dir=contact_in_cam_dir,
+ cvw=convexity_vision_weight,
+ octree_convex=octree_convex,
+ occluded=occluded,
+ octree_contact_filter=octree_contact_filter,
+ convexity_weight=convexity_weight,
+ offset_frames=offset_frames,
+ power_interp=power_interp,
+ )
+
+ else:
+ raise RuntimeError
+
+
+if __name__ == '__main__':
+ main_command() # pylint: disable=no-value-for-parameter
+
+
diff --git a/run_ho3d.py b/bundlenets/run_ho3d.py
similarity index 78%
rename from run_ho3d.py
rename to bundlenets/run_ho3d.py
index aa52c50..e5f0829 100644
--- a/run_ho3d.py
+++ b/bundlenets/run_ho3d.py
@@ -7,12 +7,14 @@
# license agreement from NVIDIA CORPORATION is strictly prohibited.
-from bundlesdf import *
+from bundlenets.bundlesdf import *
import argparse
import os,sys
-CODE_DIR = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(f'{CODE_DIR}/BundleTrack/scripts')
from data_reader import *
+from bundlenets import file_utils
+from bundlenets.file_utils import BUNDLENETS_REPO_DIR
+
+sys.path.append(f'{file_utils.BUNDLENETS_REPO_DIR}/BundleTrack/scripts')
def run_one_video(video_dir,out_dir):
@@ -27,28 +29,26 @@ def run_one_video(video_dir,out_dir):
print(f"{out_folder} done before, skip")
return
- os.system(f"rm -rf {out_folder} && mkdir -p {out_folder}")
+ file_utils.remove_and_add_directory(out_folder)
- code_dir = os.path.dirname(os.path.realpath(__file__))
- cfg_bundletrack = yaml.load(open(f"{code_dir}/BundleTrack/config_ho3d.yml",'r'))
- cfg_bundletrack['data_dir'] = video_dir
+ cfg_bundletrack = file_utils.load_base_ho3d_bundlesdf_configuration()
+ cfg_bundletrack['video_dir'] = video_dir
cfg_bundletrack['SPDLOG'] = 2
cfg_bundletrack['depth_processing']["zfar"] = 1
cfg_bundletrack['debug_dir'] = out_folder
cfg_track_dir = f'{out_folder}/config_bundletrack.yml'
yaml.dump(cfg_bundletrack, open(cfg_track_dir,'w'))
- cfg_nerf = yaml.load(open(f"{code_dir}/config.yml",'r'))
+ cfg_nerf = yaml.load(open(f"{BUNDLENETS_REPO_DIR}/config.yml",'r'))
cfg_nerf['trunc_start'] = 0.01
cfg_nerf['trunc'] = 0.01
cfg_nerf['down_scale_ratio'] = 1
cfg_nerf['far'] = cfg_bundletrack['depth_processing']["zfar"]
- cfg_nerf['datadir'] = f"{out_folder}/nerf_with_bundletrack_online"
- cfg_nerf['save_dir'] = copy.deepcopy(cfg_nerf['datadir'])
+ cfg_nerf['nerf_temp_dir'] = f"{out_folder}/nerf_with_bundletrack_online"
cfg_nerf_dir = f'{out_folder}/config_nerf.yml'
yaml.dump(cfg_nerf, open(cfg_nerf_dir,'w'))
- tracker = BundleSdf(cfg_track_dir=cfg_track_dir, cfg_nerf_dir=cfg_nerf_dir, start_nerf_keyframes=5, use_gui=args.use_gui)
+ tracker = BundleSdf(cfg_track_yaml=cfg_track_dir, cfg_nerf_yaml=cfg_nerf_dir, start_nerf_keyframes=5, use_gui=args.use_gui)
for i,color_file in enumerate(reader.color_files):
color = cv2.imread(color_file)
@@ -74,7 +74,7 @@ def run_one_video_global_nerf(video_dir,out_dir):
video_name = reader.get_video_name()
out_folder = f'{out_dir}/{video_name}/' #!NOTE there has to be a / in the end
- tracker = BundleSdf(cfg_track_dir=f"{out_folder}/config_bundletrack.yml", cfg_nerf_dir=f"{out_folder}/config_nerf.yml", start_nerf_keyframes=5, use_gui=False)
+ tracker = BundleSdf(cfg_track_yaml=f"{out_folder}/config_bundletrack.yml", cfg_nerf_yaml=f"{out_folder}/config_nerf.yml", start_nerf_keyframes=5, use_gui=False)
tracker.cfg_nerf['n_step'] = 2000
tracker.cfg_nerf['N_samples'] = 256
tracker.cfg_nerf['N_samples'] = 128
@@ -89,8 +89,7 @@ def run_one_video_global_nerf(video_dir,out_dir):
tracker.cfg_nerf['i_save_ray'] = tracker.cfg_nerf['i_img']
tracker.debug_dir = f'{out_folder}'
- tracker.cfg_nerf['datadir'] = f"{tracker.debug_dir}/nerf_with_bundletrack_online"
- tracker.cfg_nerf['save_dir'] = copy.deepcopy(tracker.cfg_nerf['datadir'])
+ tracker.cfg_nerf['nerf_temp_dir'] = f"{tracker.debug_dir}/nerf_with_bundletrack_online"
tracker.run_global_nerf()
diff --git a/bundlenets/scripts_misc/check_bsdf_results_completed.py b/bundlenets/scripts_misc/check_bsdf_results_completed.py
new file mode 100644
index 0000000..4b4c509
--- /dev/null
+++ b/bundlenets/scripts_misc/check_bsdf_results_completed.py
@@ -0,0 +1,34 @@
+"""Check if the files 'textured_mesh.obj' and 'mesh_cleaned.obj' exist in the specified directories.
+They are the geometry outputs of BundlesDF.
+"""
+
+import os
+
+def check_files(result_paths):
+ for path in result_paths:
+ textured_mesh_exists = False
+ mesh_cleaned_exists = False
+
+ for root, dirs, files in os.walk(path):
+ if 'textured_mesh.obj' in files:
+ textured_mesh_exists = True
+ if 'mesh_cleaned.obj' in files:
+ mesh_cleaned_exists = True
+
+ if textured_mesh_exists and mesh_cleaned_exists:
+ break
+
+ print(f"Checking path: {path}")
+ print(f" textured_mesh.obj exists: {textured_mesh_exists}")
+ print(f" mesh_cleaned.obj exists: {mesh_cleaned_exists}")
+
+# Example usage
+result_paths = [
+ "results/robotocc_oatly_7/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-t11-cic",
+ "results/robotocc_oatly_7/bundlesdf_iteration_2/bundlesdf_id_00-t11-cic/nerf_runs/bundlesdf_id_00-t11-cic",
+ "results/robotocc_bakingbox_12/bundlesdf_iteration_2/bundlesdf_id_00-t11-cic/nerf_runs/bundlesdf_id_00-t11-cic",
+ "results/robotocc_toblerone_9/bundlesdf_iteration_2/bundlesdf_id_00-t11-cic/nerf_runs/bundlesdf_id_00-t11-cic",
+ "results/robotocc_bakingbox_9/bundlesdf_iteration_2/bundlesdf_id_00-t11-cic/nerf_runs/bundlesdf_id_00-t11-cic"
+]
+
+check_files(result_paths)
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/check_eval_results_valid.py b/bundlenets/scripts_misc/check_eval_results_valid.py
new file mode 100644
index 0000000..3b99497
--- /dev/null
+++ b/bundlenets/scripts_misc/check_eval_results_valid.py
@@ -0,0 +1,20 @@
+"""Read the evaluation result of all experiments with a specific tag.
+An example is shown to check a specific metric in the result file.
+"""
+
+import os
+import yaml
+
+EVAL_RESULT_ROOT = "/mnt/data0/minghz/repos/bundlenets/cnets-data-generation/evaluation"
+EXP_TAG = "00-cvwo_00-cvwo_1"
+
+for exp in os.listdir(EVAL_RESULT_ROOT):
+ if EXP_TAG in exp:
+ result_path = os.path.join(EVAL_RESULT_ROOT, exp, "results.yaml")
+ if not os.path.exists(result_path):
+ print(f"Missing result for {exp}")
+ continue
+ with open(result_path, 'r') as f:
+ results = yaml.load(f, Loader=yaml.FullLoader)
+ if results['tracking_metrics']['against_bundlesdf']['penetration_learned_geom_estimated_traj']['full']['auc'] is None:
+ print(f"{exp}")
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/list_folders_size.py b/bundlenets/scripts_misc/list_folders_size.py
new file mode 100644
index 0000000..2062493
--- /dev/null
+++ b/bundlenets/scripts_misc/list_folders_size.py
@@ -0,0 +1,27 @@
+"""This script lists the sizes of all folders in a given directory and writes the results in descending order to a text file.
+"""
+
+import os
+
+def get_folder_size(folder_path):
+ total_size = 0
+ for dirpath, dirnames, filenames in os.walk(folder_path):
+ for f in filenames:
+ fp = os.path.join(dirpath, f)
+ total_size += os.path.getsize(fp)
+ total_size_mb = total_size / (1024 * 1024) # Convert bytes to megabytes
+ print(f"Folder {folder_path} size: {total_size_mb:.2f} MB")
+ return total_size_mb
+
+def list_folders_size(path):
+ folders = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
+ folder_sizes = [(folder, get_folder_size(os.path.join(path, folder))) for folder in folders]
+ folder_sizes.sort(key=lambda x: x[1], reverse=True)
+
+ with open('folder_sizes.txt', 'w') as f:
+ for folder, size in folder_sizes:
+ f.write(f"{folder}: {size:.2f} MB\n")
+
+if __name__ == "__main__":
+ BSDF_RESULT_ROOT = "/mnt/data0/minghz/repos/bundlenets/results"
+ list_folders_size(BSDF_RESULT_ROOT)
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/move_pll_results.py b/bundlenets/scripts_misc/move_pll_results.py
new file mode 100644
index 0000000..1a4f6b8
--- /dev/null
+++ b/bundlenets/scripts_misc/move_pll_results.py
@@ -0,0 +1,20 @@
+"""This script moves all result folders from the source directory to the destination directory,
+except those that start with EXP_PREFIX_TO_EXCLUDE.
+"""
+
+import os
+import shutil
+
+PLL_RESULT_ROOT = '/mnt/data0/minghz/repos/bundlenets/dair_pll/results'
+PLL_RESULT_ROOT_BK = '/mnt/data2/minghz/bundlesdf/dair_pll/results'
+EXP_PREFIX_TO_EXCLUDE = 'vision_robotocc'
+
+# Ensure the destination directory exists
+os.makedirs(PLL_RESULT_ROOT_BK, exist_ok=True)
+
+# Iterate over all items in the source directory
+for item in os.listdir(PLL_RESULT_ROOT):
+ item_path = os.path.join(PLL_RESULT_ROOT, item)
+ if os.path.isdir(item_path) and not item.startswith(EXP_PREFIX_TO_EXCLUDE):
+ shutil.move(item_path, PLL_RESULT_ROOT_BK)
+ print(f'Moved {item_path}')
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/shrink_bsdf_cps_slices.py b/bundlenets/scripts_misc/shrink_bsdf_cps_slices.py
new file mode 100644
index 0000000..87a1910
--- /dev/null
+++ b/bundlenets/scripts_misc/shrink_bsdf_cps_slices.py
@@ -0,0 +1,94 @@
+"""This script processes BundleSDF result folders containing cps_slices.pt and subsamples them to save storage.
+"""
+
+import os
+import torch
+import random
+
+
+# cps_slices.pt and cps_slices_predsdf.pt are saved to visualize the learned SDF
+# around the contact points in 2D slices.
+# They are large and can be subsampled to save space.
+BSDF_RESULT_ROOT = "/mnt/data0/minghz/repos/bundlenets/results"
+N_SLICES_THRESHOLD = 50
+SUBSAMPLE_RATIO = 0.2
+
+def process_files(base_path, print_only=False):
+ total_saved_storage = 0
+ total_original_size = 0
+
+ log_file_path = 'cps_slices_storage_shrink_log.txt'
+ with open(log_file_path, 'w') as log_file:
+ for x in os.listdir(base_path):
+ if not os.path.isdir(os.path.join(base_path, x)):
+ continue
+ for y in os.listdir(os.path.join(base_path, x)):
+ if not os.path.isdir(os.path.join(base_path, x, y)):
+ continue
+ for z in os.listdir(os.path.join(base_path, x, y)):
+ if not os.path.isdir(os.path.join(base_path, x, y, z)):
+ continue
+ for w in os.listdir(os.path.join(base_path, x, y, z, 'nerf_runs')):
+ if not os.path.isdir(os.path.join(base_path, x, y, z, 'nerf_runs', w)):
+ continue
+
+ cps_slices_path = os.path.join(base_path, x, y, z, 'nerf_runs', w, 'sdf_inspection', 'cps_slices.pt')
+ cps_slices_predsdf_path = os.path.join(base_path, x, y, z, 'nerf_runs', w, 'sdf_inspection', 'cps_slices_predsdf.pt')
+
+ if os.path.exists(cps_slices_path) and os.path.exists(cps_slices_predsdf_path):
+ cps_slices = torch.load(cps_slices_path, map_location='cpu')
+ cps_slices_predsdf = torch.load(cps_slices_predsdf_path, map_location='cpu')
+
+ original_size_cps_slices = cps_slices.numel() * cps_slices.element_size()
+ original_size_cps_slices_predsdf = cps_slices_predsdf.numel() * cps_slices_predsdf.element_size()
+
+ total_original_size += (original_size_cps_slices + original_size_cps_slices_predsdf)
+ log_lines = [
+ f"Processed {cps_slices_path} and {cps_slices_predsdf_path}",
+ f"Accumulated original size: {total_original_size / (1024 * 1024):.2f} MB",
+ f"Original size: {(original_size_cps_slices + original_size_cps_slices_predsdf) / (1024 * 1024):.2f} MB"
+ ]
+ for line in log_lines:
+ log_file.write(line + '\n')
+ print(line)
+
+ flag_subsample = True
+ n = cps_slices.shape[0]
+ if n < N_SLICES_THRESHOLD: # or size_original threshold
+ print(f"Skipping {cps_slices_path} and {cps_slices_predsdf_path} due to small size, n = {n}")
+ flag_subsample = False
+ if not print_only:
+ if not flag_subsample:
+ cps_slices_subsampled = cps_slices
+ cps_slices_predsdf_subsampled = cps_slices_predsdf
+ else:
+ indices = random.sample(range(n), int(n * SUBSAMPLE_RATIO))
+ cps_slices_subsampled = cps_slices[indices]
+ cps_slices_predsdf_subsampled = cps_slices_predsdf[indices]
+
+ torch.save(cps_slices_subsampled, cps_slices_path)
+ torch.save(cps_slices_predsdf_subsampled, cps_slices_predsdf_path)
+
+ new_size_cps_slices = cps_slices_subsampled.numel() * cps_slices_subsampled.element_size()
+ new_size_cps_slices_predsdf = cps_slices_predsdf_subsampled.numel() * cps_slices_predsdf_subsampled.element_size()
+
+ saved_storage = (original_size_cps_slices + original_size_cps_slices_predsdf) - (new_size_cps_slices + new_size_cps_slices_predsdf)
+ total_saved_storage += saved_storage
+ log_lines = [
+ f"New size: {(new_size_cps_slices + new_size_cps_slices_predsdf) / (1024 * 1024):.2f} MB",
+ f"Saved storage: {saved_storage / (1024 * 1024):.2f} MB\n"
+ ]
+ for line in log_lines:
+ log_file.write(line + '\n')
+ print(line)
+
+ if not print_only:
+ log_file.write(f"Total saved storage: {total_saved_storage / (1024 * 1024):.2f} MB\n")
+ print(f"Total saved storage: {total_saved_storage / (1024 * 1024):.2f} MB")
+
+ log_file.write(f"Total original size: {total_original_size / (1024 * 1024):.2f} MB\n")
+ print(f"Total original size: {total_original_size / (1024 * 1024):.2f} MB")
+
+if __name__ == "__main__":
+ print_only = False # Set this to True to only print the original sizes without rewriting
+ process_files(BSDF_RESULT_ROOT, print_only)
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/shrink_bsdf_results.py b/bundlenets/scripts_misc/shrink_bsdf_results.py
new file mode 100644
index 0000000..fa6f08f
--- /dev/null
+++ b/bundlenets/scripts_misc/shrink_bsdf_results.py
@@ -0,0 +1,73 @@
+"""Clean up the intermediate files in BundleSDF results to save space.
+"""
+
+import os
+
+def get_size_of_folder(folder_path):
+ total_size = 0
+ for dirpath, dirnames, filenames in os.walk(folder_path):
+ for f in filenames:
+ fp = os.path.join(dirpath, f)
+ total_size += os.path.getsize(fp)
+ return total_size
+
+BSDF_RESULT_ROOT = '/mnt/data0/minghz/repos/bundlenets/results'
+EXP_PREFIX_TO_EXCLUDE = 'robotocc'
+
+# These folders are saved by BundleSDF but are not needed for the final results.
+# They are large and can be removed to save space.
+folders_to_remove = [
+ 'depth',
+ 'depth_filtered',
+ 'color',
+]
+
+# free_pts.pt, uncertain_pts.pt, and their sdf predictions are saved for debugging purposes.
+# They are not related to contacts but they are pretty large.
+# We can remove them to save space.
+remove_sdf_inspections = True
+
+size_released = 0
+for vision_asset in os.listdir(BSDF_RESULT_ROOT):
+ asset_path = os.path.join(BSDF_RESULT_ROOT, vision_asset)
+ if os.path.isdir(asset_path) and not asset_path.startswith(EXP_PREFIX_TO_EXCLUDE):
+ for iter in os.listdir(asset_path):
+ iter_path = os.path.join(asset_path, iter)
+ if os.path.isdir(iter_path):
+ for run in os.listdir(iter_path):
+ run_path = os.path.join(iter_path, run)
+ if os.path.isdir(run_path):
+ for folder in folders_to_remove:
+ folder_path = os.path.join(run_path, folder)
+ if os.path.isdir(folder_path):
+ # remove the folder
+ size_folder = get_size_of_folder(folder_path)
+ size_mb = size_folder / 1024 / 1024
+ size_released += size_mb
+ print(f'{folder_path} size: {size_mb} MB')
+ os.system(f'rm -r {folder_path}')
+ print(f'removed {folder_path}')
+ if remove_sdf_inspections:
+ nerf_run_path = os.path.join(run_path, 'nerf_runs')
+ for nerf_run in os.listdir(nerf_run_path):
+ nerf_run_full_path = os.path.join(nerf_run_path, nerf_run)
+ if os.path.isdir(nerf_run_full_path):
+ sdf_path = os.path.join(nerf_run_full_path, 'sdf_inspection')
+ free_pts_path = os.path.join(sdf_path, 'free_pts.pt')
+ free_pts_sdf_path = os.path.join(sdf_path, 'free_pts_predsdf.pt')
+ uncertain_pts_path = os.path.join(sdf_path, 'uncertain_pts.pt')
+ uncertain_pts_sdf_path = os.path.join(sdf_path, 'uncertain_pts_predsdf.pt')
+ if os.path.exists(free_pts_path):
+ os.system(f'rm {free_pts_path}')
+ print(f'removed {free_pts_path}')
+ if os.path.exists(free_pts_sdf_path):
+ os.system(f'rm {free_pts_sdf_path}')
+ print(f'removed {free_pts_sdf_path}')
+ if os.path.exists(uncertain_pts_path):
+ os.system(f'rm {uncertain_pts_path}')
+ print(f'removed {uncertain_pts_path}')
+ if os.path.exists(uncertain_pts_sdf_path):
+ os.system(f'rm {uncertain_pts_sdf_path}')
+ print(f'removed {uncertain_pts_sdf_path}')
+
+print(f'Total size released: {size_released} MB')
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/vis_ply.py b/bundlenets/scripts_misc/vis_ply.py
new file mode 100644
index 0000000..7328f99
--- /dev/null
+++ b/bundlenets/scripts_misc/vis_ply.py
@@ -0,0 +1,194 @@
+import open3d as o3d
+import numpy as np
+import os
+
+import matplotlib.pyplot as plt
+from sklearn.cluster import DBSCAN
+
+def read_ply(file_path):
+ # Read the point cloud from the PLY file
+ pcd = o3d.io.read_point_cloud(file_path)
+ return pcd
+
+def plot_point_cloud(points):
+
+ # Subsample if there are more than 10000 points
+ if len(points) > 10000:
+ print(f"Total number of points: {len(points)}")
+ indices = np.random.choice(len(points), 10000, replace=False)
+ points = points[indices]
+
+ # Calculate colors based on x + y + z
+ colors = points[:, 0] + points[:, 1] + points[:, 2]
+
+ # Plot the point cloud
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+ scatter = ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=colors, cmap='viridis', s=1)
+ fig.colorbar(scatter, ax=ax, label='x + y + z')
+
+ # Function to update the view
+ def on_key(event):
+ if event.key == 'up':
+ ax.view_init(elev=ax.elev + 10, azim=ax.azim)
+ elif event.key == 'down':
+ ax.view_init(elev=ax.elev - 10, azim=ax.azim)
+ elif event.key == 'left':
+ ax.view_init(elev=ax.elev, azim=ax.azim - 10)
+ elif event.key == 'right':
+ ax.view_init(elev=ax.elev, azim=ax.azim + 10)
+ fig.canvas.draw_idle()
+
+ fig.canvas.mpl_connect('key_press_event', on_key)
+ plt.show()
+
+def visualize_filter(points, filtered_points, title=None):
+ fig = plt.figure(figsize=(12, 5))
+
+ # Original point cloud
+ ax1 = fig.add_subplot(121, projection='3d')
+ ax1.scatter(points[:, 0], points[:, 1], points[:, 2],
+ c='b', marker='o', s=1, alpha=0.6)
+ ax1.set_title("Original Point Cloud")
+ ax1.set_xlabel('X')
+ ax1.set_ylabel('Y')
+ ax1.set_zlabel('Z')
+
+ # Filtered point cloud
+ ax2 = fig.add_subplot(122, projection='3d')
+ ax2.scatter(filtered_points[:, 0], filtered_points[:, 1], filtered_points[:, 2],
+ c='r', marker='o', s=1, alpha=0.6)
+ ax2.set_title(f"Filtered Point Cloud")
+ ax2.set_xlabel('X')
+ ax2.set_ylabel('Y')
+ ax2.set_zlabel('Z')
+
+ # Set the same limits for both subplots
+ all_points = np.vstack((points, filtered_points))
+ x_limits = [np.min(all_points[:, 0]), np.max(all_points[:, 0])]
+ y_limits = [np.min(all_points[:, 1]), np.max(all_points[:, 1])]
+ z_limits = [np.min(all_points[:, 2]), np.max(all_points[:, 2])]
+
+ ax1.set_xlim(x_limits)
+ ax1.set_ylim(y_limits)
+ ax1.set_zlim(z_limits)
+
+ ax2.set_xlim(x_limits)
+ ax2.set_ylim(y_limits)
+ ax2.set_zlim(z_limits)
+
+ # Set the xyz length scale to be the same
+ ax1.set_box_aspect([np.ptp(x_limits), np.ptp(y_limits), np.ptp(z_limits)])
+ ax2.set_box_aspect([np.ptp(x_limits), np.ptp(y_limits), np.ptp(z_limits)])
+
+ if title is not None:
+ plt.suptitle(title)
+ plt.tight_layout()
+ plt.show()
+
+def filter_dbscan(points, eps=0.1, min_samples=50, visualize=True):
+ """
+ Filter using DBSCAN clustering to identify the main cluster.
+ """
+ # Run DBSCAN
+ db = DBSCAN(eps=eps, min_samples=min_samples).fit(points)
+ labels = db.labels_
+
+ # Find the largest cluster (excluding noise points labeled as -1)
+ unique_labels = np.unique(labels)
+ unique_labels = unique_labels[unique_labels != -1]
+ largest_cluster = max(unique_labels, key=lambda x: np.sum(labels == x))
+
+ # Create mask for points in largest cluster
+ mask = (labels == largest_cluster)
+ filtered_points = points[mask]
+
+ # Print statistics
+ print(f"DBSCAN clustering:")
+ print(f" Epsilon: {eps}")
+ print(f" Min samples: {min_samples}")
+ print(f" Largest cluster: {largest_cluster}")
+ print(f" Points before: {len(points)}")
+ print(f" Points after: {len(filtered_points)}")
+
+ if visualize:
+ visualize_filter(points, filtered_points, "DBSCAN Filtering")
+
+ return filtered_points, mask
+
+def filter_z_statistical(points, n_std=2.0, visualize=True):
+ """
+ Filter points based on z-value statistics.
+
+ Args:
+ points (np.ndarray): Input point cloud of shape (N, 3)
+ n_std (float): Number of standard deviations for filtering threshold
+ visualize (bool): Whether to visualize results
+
+ Returns:
+ np.ndarray: Filtered point cloud
+ np.ndarray: Boolean mask of kept points
+ """
+ # Get z values
+ z_values = points[:, 2]
+
+ # Calculate mean and standard deviation of z values
+ z_mean = np.mean(z_values)
+ z_std = np.std(z_values, ddof=1) # ddof=1 for sample standard deviation
+
+ # Create mask for points within n standard deviations
+ mask = np.abs(z_values - z_mean) <= n_std * z_std
+ filtered_points = points[mask]
+
+ # Print statistics
+ print(f"Z statistics:")
+ print(f" Mean: {z_mean:.6f}")
+ print(f" Std: {z_std:.6f}")
+ print(f" Range kept: {z_mean - n_std*z_std:.6f} to {z_mean + n_std*z_std:.6f}")
+ print(f" Points before: {len(points)}")
+ print(f" Points after: {len(filtered_points)}")
+
+ if visualize:
+ visualize_filter(points, filtered_points, f"Z-Statistical Filtering (±{n_std} std)")
+
+ return filtered_points, mask
+
+if __name__ == "__main__":
+
+ ### Segmented point cloud from rgb mask may contain a few non-object points (e.g., from foreground occlusion)
+ ### We can use DBSCAN or Z-statistical filtering to remove them.
+ ### They are implemented in Frame.cpp. Here are the python versions to visualize the results.
+
+ BSDF_RESULT_ROOT = "/mnt/data0/minghz/repos/bundlenets/results"
+
+ cloud_for_init_coord_file = 'cloud_for_init_coord.ply'
+ cloud_for_init_coord_cluster_file = 'cloud_for_init_coord_cluster.ply'
+ cloud_for_init_coord_2sigma_file = 'cloud_for_init_coord_2sigma.ply'
+
+ file_path = os.path.join(BSDF_RESULT_ROOT, "robotocc_bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-rebuild", cloud_for_init_coord_file)
+ pcd = read_ply(file_path)
+ points = np.asarray(pcd.points)
+ plot_point_cloud(points)
+
+ points_dbscan_py, mask_dbscan = filter_dbscan(points)
+ points_zstats_py, mask_zstats = filter_z_statistical(points)
+
+ file_path = os.path.join(BSDF_RESULT_ROOT, "robotocc_bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-rebuild", cloud_for_init_coord_cluster_file)
+ pcd = read_ply(file_path)
+ points_dbscan_cpp = np.asarray(pcd.points)
+
+ file_path = os.path.join(BSDF_RESULT_ROOT, "robotocc_bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-rebuild", cloud_for_init_coord_2sigma_file)
+ pcd = read_ply(file_path)
+ points_z_stats_cpp = np.asarray(pcd.points)
+
+ # plot_point_cloud(points_dbscan_py)
+ # plot_point_cloud(points_dbscan_cpp)
+ # plot_point_cloud(points_zstats_py)
+ # plot_point_cloud(points_z_stats_cpp)
+
+ ### The following code is for visualizing the point cloud used to build the octree.
+ file_path = os.path.join(BSDF_RESULT_ROOT, "robotocc_oatly_3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-t09d/nerf/build_octree_cloud.ply")
+ pcd = read_ply(file_path)
+ # Convert the point cloud to numpy array
+ points = np.asarray(pcd.points)
+ plot_point_cloud(pcd)
\ No newline at end of file
diff --git a/bundlenets/scripts_misc/vis_rgb_depth_mask.py b/bundlenets/scripts_misc/vis_rgb_depth_mask.py
new file mode 100644
index 0000000..03d7a4f
--- /dev/null
+++ b/bundlenets/scripts_misc/vis_rgb_depth_mask.py
@@ -0,0 +1,99 @@
+"""This script visualizes RGB and depth images along with their masks.
+Depth images are scaled to 8-bit for better visualization.
+It overlays the depth information on the RGB image and also shows the masked versions."""
+
+import cv2
+import numpy as np
+import matplotlib.pyplot as plt
+import os
+import matplotlib
+
+# =============================================================================
+# Matplotlib backend configuration
+# If you're using SSH with X11 forwarding, uncomment the following line:
+matplotlib.use('TkAgg')
+# This helps prevent Qt/X11 errors like:
+# QObject::moveToThread: Current thread (0x55e00d8db800) is not the object's thread (0x55e00db6f700).
+# Cannot move to target thread (0x55e00d8db800)
+
+# qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "/opt/conda/envs/py38/lib/python3.8/site-packages/cv2/qt/plugins" even though it was found.
+# This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem.
+
+# Comment this line if you're NOT using X11 forwarding or if it causes issues
+# =============================================================================
+
+def read_image(image_path):
+ return cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
+
+def apply_mask(image, mask):
+ return cv2.bitwise_and(image, image, mask=mask)
+
+def scale_depth_image(depth_image):
+ depth_min = np.min(depth_image)
+ depth_max = np.max(depth_image)
+ depth_image_scaled = (depth_image - depth_min) / (depth_max - depth_min) * 255
+ return depth_image_scaled.astype(np.uint8)
+
+def overlay_depth_on_rgb(rgb_image, depth_image):
+ depth_colored = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
+ overlay = cv2.addWeighted(rgb_image, 0.5, depth_colored, 0.5, 0)
+ return overlay
+
+def visualize_images(rgb_image, depth_image, masked_rgb, masked_depth, overlay_image, overlay_image_masked):
+ fig, axs = plt.subplots(2, 3, figsize=(15, 10))
+
+ axs[0, 0].imshow(cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB))
+ axs[0, 0].set_title('Original RGB Image')
+ axs[0, 0].axis('off')
+
+ axs[0, 1].imshow(depth_image, cmap='gray')
+ axs[0, 1].set_title('Original Depth Image')
+ axs[0, 1].axis('off')
+
+ axs[0, 2].imshow(cv2.cvtColor(overlay_image, cv2.COLOR_BGR2RGB))
+ axs[0, 2].set_title('Image overlayed with full depth')
+ axs[0, 2].axis('off')
+
+ axs[1, 0].imshow(cv2.cvtColor(masked_rgb, cv2.COLOR_BGR2RGB))
+ axs[1, 0].set_title('Masked RGB Image')
+ axs[1, 0].axis('off')
+
+ axs[1, 1].imshow(masked_depth, cmap='gray')
+ axs[1, 1].set_title('Masked Depth Image')
+ axs[1, 1].axis('off')
+
+ axs[1, 2].imshow(cv2.cvtColor(overlay_image_masked, cv2.COLOR_BGR2RGB))
+ axs[1, 2].set_title('Image overlayed with masked depth')
+ axs[1, 2].axis('off')
+
+ plt.tight_layout()
+ plt.show()
+
+def main():
+ BSDF_DATA_ROOT = '/mnt/data0/minghz/repos/bundlenets/data'
+ asset = 'robotocc_bakingbox_1-8'
+ files = os.listdir(f'{BSDF_DATA_ROOT}/{asset}/masks')
+ # randomly sample 5 files
+ files = np.random.choice(files, 5)
+ for file in files:
+ mask_path = f'{BSDF_DATA_ROOT}/{asset}/masks/{file}' # 0001.png
+ rgb_image_path = f'{BSDF_DATA_ROOT}/{asset}/rgb/{file}'
+ depth_image_path = f'{BSDF_DATA_ROOT}/{asset}/depth/{file}'
+ print(f'Processing {rgb_image_path}...')
+
+ mask = read_image(mask_path)
+ rgb_image = read_image(rgb_image_path)
+ depth_image = read_image(depth_image_path)
+ depth_image_scaled = scale_depth_image(depth_image)
+
+ masked_rgb = apply_mask(rgb_image, mask)
+ masked_depth = apply_mask(depth_image, mask)
+ masked_depth_scaled = scale_depth_image(masked_depth)
+
+ overlay_image_all = overlay_depth_on_rgb(rgb_image, depth_image_scaled)
+ overlay_image_masked = overlay_depth_on_rgb(rgb_image, masked_depth_scaled)
+
+ visualize_images(rgb_image, depth_image_scaled, masked_rgb, masked_depth_scaled, overlay_image_all, overlay_image_masked)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/segmentation_utils.py b/bundlenets/segmentation_utils.py
similarity index 76%
rename from segmentation_utils.py
rename to bundlenets/segmentation_utils.py
index 737599d..5226694 100644
--- a/segmentation_utils.py
+++ b/bundlenets/segmentation_utils.py
@@ -15,4 +15,5 @@ def __int__(self):
return
def run(self, mask_file=None):
- return (cv2.imread(mask_file, -1)>0).astype(np.uint8)
+ return (cv2.imread(mask_file, -1)>200).astype(np.uint8)
+ # milk_1-5 masks have values on 60 on background person, so we need to threshold them in segmenter
diff --git a/tool.py b/bundlenets/tool.py
similarity index 50%
rename from tool.py
rename to bundlenets/tool.py
index 3832ca6..3008474 100644
--- a/tool.py
+++ b/bundlenets/tool.py
@@ -10,10 +10,10 @@
import joblib,json,gzip,pickle
from sklearn.cluster import DBSCAN
import shutil,re,imageio,pdb,os,sys
-from Utils import *
+from bundlenets.Utils import *
from BundleTrack.scripts.data_reader import *
import pandas as pd
-from contact_loss_utils import generate_contact_pts
+
def find_biggest_cluster(pts, eps=0.06, min_samples=1):
dbscan = DBSCAN(eps=eps,min_samples=min_samples,n_jobs=-1)
@@ -25,21 +25,33 @@ def find_biggest_cluster(pts, eps=0.06, min_samples=1):
return pts_cluster, keep_mask
-def compute_translation_scales(pts,max_dim=2,cluster=True, eps=0.06, min_samples=1):
+def compute_translation_scales(pts, max_dim=2, cluster=True, eps=0.06,
+ min_samples=1, pts_contact=None):
+ # pts: p^{OBJ}
if cluster:
pts, keep_mask = find_biggest_cluster(pts, eps, min_samples)
else:
keep_mask = np.ones((len(pts)), dtype=bool)
+ if pts_contact is not None:
+ pts = np.concatenate([pts,pts_contact],axis=0)
+ # keep_mask is not updated for pts_contact
max_xyz = pts.max(axis=0)
min_xyz = pts.min(axis=0)
center = (max_xyz+min_xyz)/2
sc_factor = max_dim/(max_xyz-min_xyz).max() #Normalize to [-1,1]
sc_factor *= 0.9 # Reserver some space
translation_cvcam = -center
+ # center: t^{OBJ}_{CTR}, translation_cvcam: t^{CTR}_{OBJ}, sc_factor: s^{NORM}_{CTR}
+ # where {OBJ} is the world metric frame fixed at the object tracking origin,
+ # {CTR} is the world metric frame fixed at the object center,
+ # {NORM} is the normalized frame (with object in a unit cube) fixed at the object center.
+ # The SDF shape is defined in the {NORM} frame.
+ # T{NORM}_{CTR} = [s^{NORM}_{CTR}*I, 0;0,1], T^{CTR}_{OBJ} = [I, t^{CTR}_{OBJ};0,1]
return translation_cvcam, sc_factor, keep_mask
-def compute_scene_bounds_worker(color_file,K,glcam_in_world,use_mask,rgb=None,depth=None,mask=None):
+def compute_scene_bounds_worker(color_file, K, glcam_in_world, use_mask,
+ rgb=None, depth=None, mask=None):
if rgb is None:
depth_file = color_file.replace('images','depth_filtered')
mask_file = color_file.replace('images','masks')
@@ -55,15 +67,24 @@ def compute_scene_bounds_worker(color_file,K,glcam_in_world,use_mask,rgb=None,de
if len(pts)==0:
return None
colors = rgb[valid].reshape(-1,3)
+ # p^{CVCAM}: point cloud in the {CVCAM} reference frame
pcd = toOpen3dCloud(pts,colors)
pcd = pcd.voxel_down_sample(0.01)
pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=30,std_ratio=2.0)
- cam_in_world = glcam_in_world@glcam_in_cvcam
+ # T^{WORLD}_{CVCAM} = T^{WORLD}_{CVCAM} * T^{CVCAM}_{GLCAM}
+ # (T^{CVCAM}_{GLCAM} = T^{GLCAM}_{CVCAM} numerically in our specific case)
+ # (WORLD is OBJ, the tracking origin of the object)
+ cam_in_world = glcam_in_world@GLCAM_IN_CVCAM
+ # p^{WORLD} = T^{WORLD}_{CVCAM} * p^{CVCAM}
pcd.transform(cam_in_world)
return np.asarray(pcd.points).copy(), np.asarray(pcd.colors).copy()
-def compute_scene_bounds(color_files,glcam_in_worlds,K,use_mask=True,base_dir=None,rgbs=None,depths=None,masks=None,cluster=True, translation_cvcam=None, sc_factor=None, eps=0.06, min_samples=1):
+
+def compute_scene_bounds(color_files, glcam_in_worlds, K, use_mask=True,
+ base_dir=None, rgbs=None, depths=None, masks=None,
+ cluster=True, translation_cvcam=None, sc_factor=None,
+ eps=0.06, min_samples=1, ps_contact=None):
assert color_files is None or rgbs is None
if base_dir is None:
@@ -72,110 +93,66 @@ def compute_scene_bounds(color_files,glcam_in_worlds,K,use_mask=True,base_dir=No
args = []
if rgbs is not None:
for i in range(len(rgbs)):
- args.append((None,K,glcam_in_worlds[i],use_mask,rgbs[i],depths[i],masks[i]))
+ args.append((None, K, glcam_in_worlds[i], use_mask, rgbs[i], depths[i],
+ masks[i]))
else:
for i in range(len(color_files)):
args.append((color_files[i],K,glcam_in_worlds[i],use_mask))
logging.info(f"compute_scene_bounds_worker start")
- ret = joblib.Parallel(n_jobs=10, prefer="threads")(joblib.delayed(compute_scene_bounds_worker)(*arg) for arg in args)
+ ret = joblib.Parallel(n_jobs=10, prefer="threads")(
+ joblib.delayed(compute_scene_bounds_worker)(*arg) for arg in args)
logging.info(f"compute_scene_bounds_worker done")
pcd_all = None
for r in ret:
if r is None:
continue
+ # pcd_all is p^{WORLD} from compute_scene_bounds_worker(), and {WORLD} is {OBJ}.
if pcd_all is None:
pcd_all = toOpen3dCloud(r[0],r[1])
else:
pcd_all += toOpen3dCloud(r[0],r[1])
pcd = pcd_all.voxel_down_sample(eps/5)
-
- logging.info(f"merge pcd")
-
- o3d.io.write_point_cloud(f'{base_dir}/naive_fusion.ply',pcd)
pts = np.asarray(pcd.points).copy()
- def make_tf(translation_cvcam, sc_factor):
- tf = np.eye(4)
- tf[:3,3] = translation_cvcam
- tf1 = np.eye(4)
- tf1[:3,:3] *= sc_factor
- tf = tf1@tf
- return tf
-
- if translation_cvcam is None:
- translation_cvcam, sc_factor, keep_mask = compute_translation_scales(pts, cluster=cluster, eps=eps, min_samples=min_samples)
- tf = make_tf(translation_cvcam, sc_factor)
- else:
- tf = make_tf(translation_cvcam, sc_factor)
- tmp = copy.deepcopy(pcd)
- tmp.transform(tf)
- tmp_pts = np.asarray(tmp.points)
- keep_mask = (np.abs(tmp_pts)<1).all(axis=-1)
+ if ps_contact is not None:
+ if isinstance(ps_contact,torch.Tensor):
+ ps_contact = ps_contact.detach().cpu().numpy()
- logging.info(f"compute_translation_scales done")
+ pcd_contact_all = toOpen3dCloud(ps_contact)
+ # print(f'Use {ps_contact.shape} contact points in compute_scene_bounds.')
+ pcd_contact = pcd_contact_all.voxel_down_sample(eps/5)
+ pts_contact = np.asarray(pcd_contact.points).copy()
- pcd = toOpen3dCloud(pts[keep_mask],np.asarray(pcd.colors)[keep_mask])
- o3d.io.write_point_cloud(f"{base_dir}/naive_fusion_biggest_cluster.ply",pcd)
- pcd_real_scale = copy.deepcopy(pcd)
- print(f'translation_cvcam={translation_cvcam}, sc_factor={sc_factor}')
- with open(f'{base_dir}/normalization.yml','w') as ff:
- tmp = {
- 'translation_cvcam':translation_cvcam.tolist(),
- 'sc_factor':float(sc_factor),
- }
- yaml.dump(tmp,ff)
- pcd.transform(tf)
- return sc_factor, translation_cvcam, pcd_real_scale, pcd
-
-def compute_scene_bounds_from_cn(color_files,glcam_in_worlds,K,use_mask=True,base_dir=None,rgbs=None,depths=None,masks=None,cluster=True, translation_cvcam=None, sc_factor=None, eps=0.06, min_samples=1, annotated_poses_dir=None,offset=None):
- assert color_files is None or rgbs is None
-
- if base_dir is None:
- base_dir = os.path.dirname(color_files[0])+'/../'
-
- args = []
- if rgbs is not None:
- for i in range(len(rgbs)):
- args.append((None,K,glcam_in_worlds[i],use_mask,rgbs[i],depths[i],masks[i]))
+ o3d.io.write_point_cloud(f'{base_dir}/naive_fusion.ply',pcd + pcd_contact)
else:
- for i in range(len(color_files)):
- args.append((color_files[i],K,glcam_in_worlds[i],use_mask))
-
- logging.info(f"compute_scene_bounds_worker start")
- ret = joblib.Parallel(n_jobs=10, prefer="threads")(joblib.delayed(compute_scene_bounds_worker)(*arg) for arg in args)
- logging.info(f"compute_scene_bounds_worker done")
-
- pcd_all = None
- for r in ret:
- if r is None:
- continue
- if pcd_all is None:
- pcd_all = toOpen3dCloud(r[0],r[1])
- else:
- pcd_all += toOpen3dCloud(r[0],r[1])
- pcd = pcd_all.voxel_down_sample(eps/5)
+ pts_contact = None
+ o3d.io.write_point_cloud(f'{base_dir}/naive_fusion.ply',pcd)
logging.info(f"merge pcd")
- o3d.io.write_point_cloud(f'{base_dir}/naive_fusion.ply',pcd)
- pts = np.asarray(pcd.points).copy()
-
def make_tf(translation_cvcam, sc_factor):
tf = np.eye(4)
tf[:3,3] = translation_cvcam
tf1 = np.eye(4)
tf1[:3,:3] *= sc_factor
tf = tf1@tf
+ # translation_cvcam: t^{CTR}_{OBJ}, sc_factor: s^{NORM}_{CTR}
+ # T{NORM}_{CTR} = [s^{NORM}_{CTR}*I, 0;0,1], T^{CTR}_{OBJ} = [I, t^{CTR}_{OBJ};0,1]
+ # tf: T^{NORM}_{OBJ} = T^{NORM}_{CTR} * T{CTR}_{OBJ}
return tf
if translation_cvcam is None:
- translation_cvcam, sc_factor, keep_mask = compute_translation_scales(pts, cluster=cluster, eps=eps, min_samples=min_samples)
+ # translation_cvcam: t^{CTR}_{OBJ}, sc_factor: s^{NORM}_{CTR}
+ translation_cvcam, sc_factor, keep_mask = compute_translation_scales(
+ pts, cluster=cluster, eps=eps, min_samples=min_samples,
+ pts_contact=pts_contact)
tf = make_tf(translation_cvcam, sc_factor)
else:
tf = make_tf(translation_cvcam, sc_factor)
tmp = copy.deepcopy(pcd)
+ # p^{NORM} = T^{NORM}_{OBJ} * p^{OBJ}
tmp.transform(tf)
tmp_pts = np.asarray(tmp.points)
keep_mask = (np.abs(tmp_pts)<1).all(axis=-1)
@@ -188,16 +165,11 @@ def make_tf(translation_cvcam, sc_factor):
print(f'translation_cvcam={translation_cvcam}, sc_factor={sc_factor}')
with open(f'{base_dir}/normalization.yml','w') as ff:
tmp = {
- 'translation_cvcam':translation_cvcam.tolist(),
- 'sc_factor':float(sc_factor),
+ 'translation_cvcam': translation_cvcam.tolist(),
+ 'sc_factor': float(sc_factor),
}
yaml.dump(tmp,ff)
+ # p^{NORM} = T^{NORM}_{OBJ} * p^{OBJ}
pcd.transform(tf)
- ### Concantenate with sampled points ###
- contact_pts, contact_and_near_surface_sdf = generate_contact_pts(offset,translation_cvcam,sc_factor,annotated_poses_dir)
- pts = np.asarray(pcd.points).copy()
- pts = np.concatenate((pts, contact_pts), axis=0)
- pcd = toOpen3dCloud(pts)
- o3d.io.write_point_cloud(f"{base_dir}/concatenated.ply",pcd)
- ########################################
+ # sc_factor: s^{NORM}_{CTR}, translation_cvcam: t^{CTR}_{OBJ}, pcd_real_scale: p^{OBJ}, pcd: p^{NORM}
return sc_factor, translation_cvcam, pcd_real_scale, pcd
diff --git a/bundlenets/vis_depth.py b/bundlenets/vis_depth.py
new file mode 100644
index 0000000..a2b7604
--- /dev/null
+++ b/bundlenets/vis_depth.py
@@ -0,0 +1,44 @@
+import numpy as np
+import cv2
+import os
+import imageio
+
+def get_color(rgb_path):
+ color = imageio.imread(rgb_path)
+ # color = cv2.resize(color, (self.W,self.H), interpolation=cv2.INTER_NEAREST)
+ return color
+
+def get_depth(rgb_path):
+ depth = cv2.imread(rgb_path.replace('rgb','depth'),-1)
+
+ # print(depth.min(), depth.max(), depth.mean())
+ depth = depth /1e3
+ # depth = cv2.resize(depth, (self.W,self.H), interpolation=cv2.INTER_NEAREST)
+ return depth
+
+
+rgb_path = "/mnt/data0/minghz/repos/bundlenets/data/cube_2/rgb/0004.png"
+# rgb_path = "/mnt/data0/minghz/repos/bundlenets_old/data/2022-11-18-15-10-24_milk/rgb/1668813032442256375.png"
+# rgb_path = "/mnt/data0/minghz/repos/bundlenets_old/data/2022-11-18-15-10-24_milk/rgb/1668813029570568196.png"
+
+
+# load the image
+color = cv2.imread(rgb_path)
+print(color.shape)
+depth = get_depth(rgb_path)
+print(depth.shape)
+print(depth.min(), depth.max(), depth.mean())
+H, W = depth.shape[:2]
+# color2 = cv2.resize(color, (W, H), interpolation=cv2.INTER_NEAREST)
+# depth2 = cv2.resize(depth, (W, H), interpolation=cv2.INTER_NEAREST)
+# print((color2 == color).all())
+# print((depth2 == depth).all())
+print(color.shape)
+print(depth.shape)
+
+# visualize the roughness of the depth using gradient magnitude
+roughness = cv2.Laplacian(depth, cv2.CV_64F)
+roughness = np.abs(roughness)
+# roughness = cv2.normalize(roughness, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
+print(roughness.shape)
+print(roughness.min(), roughness.max(), roughness.mean(), np.median(roughness))
diff --git a/bundlenets/vis_textured_mesh.py b/bundlenets/vis_textured_mesh.py
new file mode 100644
index 0000000..cfece30
--- /dev/null
+++ b/bundlenets/vis_textured_mesh.py
@@ -0,0 +1,4 @@
+# This script is used to visualize textured_mesh.obj.
+import trimesh
+mesh = trimesh.load_mesh("results/cube_1/bundlesdf_iteration_1/bundlesdf_id_00_far2/nerf_runs/bundlesdf_id_00_far2/mesh_cleaned.obj")
+mesh.show()
\ No newline at end of file
diff --git a/bundlenets/vis_utils.py b/bundlenets/vis_utils.py
new file mode 100644
index 0000000..4dd07f5
--- /dev/null
+++ b/bundlenets/vis_utils.py
@@ -0,0 +1,753 @@
+import os
+import shutil
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+import argparse
+import torch
+import trimesh
+import pdb
+
+def plot_point_cloud(points, points_2=None):
+
+ # Subsample if there are more than 10000 points
+ if len(points) > 10000:
+ print(f"Total number of points: {len(points)}")
+ indices = np.random.choice(len(points), 10000, replace=False)
+ points = points[indices]
+
+ # Calculate colors based on x + y + z
+ colors = points[:, 0] + points[:, 1] + points[:, 2]
+
+ # Plot the point cloud
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+ scatter = ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=colors, cmap='viridis', s=1)
+ fig.colorbar(scatter, ax=ax, label='x + y + z')
+
+ if points_2 is not None:
+ if len(points_2) > 10000:
+ print(f"Total number of points: {len(points_2)}")
+ indices = np.random.choice(len(points_2), 10000, replace=False)
+ points_2 = points_2[indices]
+ scatter = ax.scatter(points_2[:, 0], points_2[:, 1], points_2[:, 2], c='r', s=1)
+ points_all = np.concatenate([points, points_2], axis=0)
+ else:
+ points_all = points
+ ax.set_box_aspect([np.ptp(arr) for arr in [points_all[:, 0], points_all[:, 1], points_all[:, 2]]])
+ ax.set_xlabel('X')
+ ax.set_ylabel('Y')
+ ax.set_zlabel('Z')
+
+ # Function to update the view
+ def on_key(event):
+ if event.key == 'up':
+ ax.view_init(elev=ax.elev + 10, azim=ax.azim)
+ elif event.key == 'down':
+ ax.view_init(elev=ax.elev - 10, azim=ax.azim)
+ elif event.key == 'left':
+ ax.view_init(elev=ax.elev, azim=ax.azim - 10)
+ elif event.key == 'right':
+ ax.view_init(elev=ax.elev, azim=ax.azim + 10)
+ fig.canvas.draw_idle()
+
+ fig.canvas.mpl_connect('key_press_event', on_key)
+ plt.show(block=False)
+
+def concatenate_masks():
+ src_base = "./data"
+ dst_dir = os.path.join(src_base, "box_0", "masks")
+ current_num = 1
+
+ # Loop through box_1 to box_10
+ for i in range(1, 11):
+ src_subfolder = os.path.join(src_base, f"box_{i}", "masks")
+
+ # Get the list of png files in the masks subfolder
+ image_files = [f for f in os.listdir(src_subfolder) if f.endswith('.png')]
+ image_files.sort() # To make sure we get them in the right order
+
+ for img_file in image_files:
+ dst_file = os.path.join(dst_dir, f"{current_num:04}.png")
+ src_file = os.path.join(src_subfolder, img_file)
+
+ # Copy the image to the destination
+ shutil.copy(src_file, dst_file)
+
+ # Update the current number
+ current_num += 1
+
+def process_masks(input_folder):
+ '''Convert grey parts in masks to background
+ '''
+ for file_name in os.listdir(input_folder):
+ if file_name.endswith('.png'):
+ file_path = os.path.join(input_folder, file_name)
+ image = Image.open(file_path)
+ image_array = np.array(image)
+ binary_image_array = (image_array >= 255).astype(np.uint8) * 255
+ processed_image = Image.fromarray(binary_image_array)
+ processed_image.save(os.path.join(input_folder, file_name))
+
+def visualize(filename, max_n=10000):
+ if isinstance(filename, str):
+ pts = torch.load(filename)
+ else:
+ pts = filename
+ pts = pts.detach().numpy()
+ if max_n > 0:
+ if pts.shape[0]>max_n:
+ idxs = np.random.permutation(pts.shape[0])
+ pts = pts[idxs[:max_n]]
+ fig = plt.figure(figsize=(8, 8))
+ ax = fig.add_subplot(111, projection='3d')
+ ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=1)
+ print(f"{pts.shape=}")
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ ax.legend()
+ plt.show()
+ plt.savefig('vis.png')
+
+def visualize_two(filename, filename2, max_n=10000):
+ pts = torch.load(filename)
+ pts2 = torch.load(filename2)
+ pts = pts.detach().numpy()
+ pts2 = pts2.detach().numpy()
+ if max_n > 0:
+ if pts.shape[0]>max_n:
+ idxs = np.random.permutation(pts.shape[0])
+ pts = pts[idxs[:max_n]]
+ if pts2.shape[0]>max_n:
+ idxs = np.random.permutation(pts2.shape[0])
+ pts2 = pts2[idxs[:max_n]]
+ fig = plt.figure(figsize=(8, 8))
+ ax = fig.add_subplot(111, projection='3d')
+ ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=1)
+ ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c='red', s=1)
+ print(f"{pts.shape=}")
+ print(f"{pts2.shape=}")
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ ax.legend()
+ plt.show()
+ plt.savefig('vis.png')
+
+def visualize_pts_sdfs(pts, sdf):
+ if isinstance(pts, str):
+ pts = torch.load(pts)
+ if isinstance(sdf, str):
+ sdf = torch.load(sdf)
+ N = pts.shape[0]
+ fig = plt.figure(figsize=(8, 8))
+ ax = fig.add_subplot(111, projection='3d')
+ # pts = torch.load(pts).detach().numpy()
+ # sdf = torch.load(sdf).detach().numpy()
+ idx = np.random.permutation(N)[:5000]
+ pts = pts[idx]
+ sdf = sdf[idx]
+ print(f"{sdf.shape=}")
+ print(f"{pts.shape=}")
+ colored = ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], c=sdf,
+ cmap='coolwarm', marker='o', vmin=-1, vmax=1,
+ label='pts', s=2)
+
+ cbar = fig.colorbar(colored)
+ cbar.set_label('sdfs')
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ ax.legend()
+ plt.show()
+ plt.savefig(f'plot_{N}.png')
+
+def visualize_slice_as_image(slice, title):
+ '''
+ slice: (B x) H x W
+ '''
+ if slice.ndim == 3:
+ sample_id = 0
+ slice = slice[sample_id] # H x W
+
+ fig = plt.figure()
+ colored = plt.imshow(slice, cmap='coolwarm',vmin=-1, vmax=1)
+ cbar = fig.colorbar(colored)
+ cbar.set_label('sdf')
+ plt.title(title)
+ plt.show()
+
+def visualize_three_pts_sdfs(pts1, sdf1, pts2, sdf2, pts3, sdf3, max_n=5000,
+ cval_geo1=True, cval_geo2=True, cval_geo3=True,
+ fig_title='', slice_x_coord=1):
+ '''
+ If sdf is provided, use sdf to colorize,
+ else if cval_geo is True, use position to colorize,
+ otherwise use a constant color.
+ '''
+ cval_sdf1 = True
+ if isinstance(pts1, str):
+ pts1 = torch.load(pts1)
+ if isinstance(sdf1, str):
+ sdf1 = torch.load(sdf1)
+ elif sdf1 is None:
+ sdf1 = torch.zeros_like(pts1[:,0])
+ cval_sdf1 = False
+
+ cval_sdf2 = True
+ if isinstance(pts2, str):
+ pts2 = torch.load(pts2)
+ if isinstance(sdf2, str):
+ sdf2 = torch.load(sdf2)
+ elif sdf2 is None:
+ sdf2 = torch.zeros_like(pts2[:,0])
+ cval_sdf2 = False
+
+ cval_sdf3 = True
+ if isinstance(pts3, str):
+ pts3 = torch.load(pts3)
+ if isinstance(sdf3, str):
+ sdf3 = torch.load(sdf3)
+ elif sdf3 is None:
+ sdf3 = torch.zeros_like(pts3[:,0])
+ cval_sdf3 = False
+
+ if isinstance(pts1, list):
+ pts1 = torch.cat(pts1, 0)
+ sdf1 = torch.cat(sdf1, 0)
+
+ if isinstance(pts2, list):
+ pts2 = torch.cat(pts2, 0)
+ sdf2 = torch.cat(sdf2, 0)
+
+ if isinstance(pts3, list):
+ pts3 = torch.cat(pts3, 0)
+ sdf3 = torch.cat(sdf3, 0)
+
+ # sdf3_neg_mask = sdf3 < 0
+ # sdf3 = sdf3[sdf3_neg_mask]
+ # pts3 = pts3[sdf3_neg_mask]
+ # sdf3 = torch.zeros_like(pts3[:,0])
+ # cval_sdf3 = False
+
+ if pts3.ndim == 4:
+ # slice_x_coord = 1
+ assert slice_x_coord is not None, "Please provide slice_x_coord"
+ # slice_x_coord is the x-coordinate of the slice to visualize
+ pts3_id = np.argmin(np.abs(pts3[:, 0, 0, 0] - slice_x_coord))
+ pts3 = pts3[pts3_id].reshape(-1, 3)
+ sdf3 = sdf3[pts3_id].reshape(-1)
+
+ print(f"Before subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}, {pts3.shape=}, {sdf3.shape=}")
+ if max_n > 0:
+ if pts1.shape[0]>max_n:
+ idxs = np.random.permutation(pts1.shape[0])
+ pts1 = pts1[idxs[:max_n]]
+ sdf1 = sdf1[idxs[:max_n]]
+ if pts2.shape[0]>max_n:
+ idxs = np.random.permutation(pts2.shape[0])
+ pts2 = pts2[idxs[:max_n]]
+ sdf2 = sdf2[idxs[:max_n]]
+ if pts3.shape[0]>2000:
+ idxs = np.random.permutation(pts3.shape[0])
+ pts3 = pts3[idxs[:2000]]
+ sdf3 = sdf3[idxs[:2000]]
+
+ print(f"After subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}, {pts3.shape=}, {sdf3.shape=}")
+
+ if cval_sdf1:
+ cmap1 = 'coolwarm'
+ cval1 = sdf1
+ print("using sdf1")
+ elif cval_geo1:
+ cmap1 = 'viridis'
+ # cmap1 = 'Greys'
+ # cval1 = pts1.sum(1)
+ cval1 = pts1[:, 2]
+ print("using z1")
+ else:
+ cmap1 = 'Greens'
+ cval1 = sdf1
+ print("using zero1")
+
+ if cval_sdf2:
+ cmap2 = 'twilight'
+ # cmap2 = 'coolwarm'
+ # cmap2 = 'coolwarm'
+ cval2 = sdf2
+ print("using sdf2")
+ elif cval_geo2:
+ cmap2 = 'plasma'
+ # cval2 = pts2.sum(1)
+ cval2 = pts2[:, 1]
+ print("using y2")
+ else:
+ cmap2 = 'Greys'
+ cval2 = sdf2
+ print("using zero2")
+
+ if cval_sdf3:
+ # cmap3 = 'hsv'
+ # cmap3 = 'twilight'
+ cmap3 = 'coolwarm'
+ cval3 = sdf3
+ print("using sdf3")
+ elif cval_geo2:
+ # cmap3 = 'plasma'
+ cmap3 = 'viridis'
+ # cval2 = pts2.sum(1)
+ cval3 = pts3[:, 0]
+ print("using y3")
+ else:
+ cmap3 = 'Oranges'
+ cval3 = sdf3
+ print("using zero3")
+
+ fig = plt.figure(figsize=(8, 8))
+ ax = fig.add_subplot(111, projection='3d')
+ pts1 = pts1.cpu().detach().numpy()
+ pts2 = pts2.cpu().detach().numpy()
+ pts3 = pts3.cpu().detach().numpy()
+ # ax.scatter(pts1[:,0], pts1[:,1], pts1[:,2], color='red', s=1)
+ colored1 = ax.scatter(pts1[:, 0], pts1[:, 1], pts1[:, 2], c=cval1,
+ cmap=cmap1, marker='o', vmin=-1, vmax=1,
+ label='pts1', s=1)
+ # ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c='green',
+ # marker='.', s=1)
+ colored3 = ax.scatter(pts3[:, 0], pts3[:, 1], pts3[:, 2], c=cval3, cmap=cmap3, vmin=-1, vmax=1,
+ marker='o', label='pts3', s=1)
+ # colored3 = ax.scatter(pts3[:, 0], pts3[:, 1], pts3[:, 2], color='blue',
+ # marker='o', label='pts3', s=1)
+ colored2 = ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c=cval2, cmap=cmap2, vmin=-1, vmax=1,
+ marker='x', label='pts2', s=10)
+ # ax.scatter(rendered_pts[:, 0], rendered_pts[:, 1], rendered_pts[:, 2])
+ # Because both scatter series are using the 'viridis' color map, the
+ # colorbar will share a mapping for both series.
+ if cval_sdf1:
+ cbar = fig.colorbar(colored1)
+ cbar.set_label('sdf1')
+ elif cval_geo1:
+ cbar = fig.colorbar(colored1)
+ cbar.set_label('pts1')
+
+ if cval_sdf2:
+ cbar = fig.colorbar(colored2)
+ cbar.set_label('sdf2')
+ elif cval_geo2:
+ cbar = fig.colorbar(colored2)
+ cbar.set_label('pts2')
+
+ if cval_sdf3:
+ cbar = fig.colorbar(colored3)
+ cbar.set_label('sdf3')
+ elif cval_geo3:
+ cbar = fig.colorbar(colored3)
+ cbar.set_label('pts3')
+
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ ax.legend()
+
+ # Set equal aspect ratio.
+ ax.set_box_aspect([np.ptp(arr) for arr in \
+ [ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]])
+ # Set figure aspect ratio to 2:1.
+ fig.set_size_inches(12, 6)
+ # Set figure title
+ plt.title(fig_title)
+
+ # Initialize view to front view (along y-axis)
+ ax.view_init(elev=0, azim=-90)
+ # Define key press event handler
+ def on_key(event):
+ elev = ax.elev
+ azim = ax.azim
+
+ if event.key == 'left' or event.key == 'a':
+ ax.view_init(elev=elev, azim=azim - 10)
+ elif event.key == 'right' or event.key == 'd':
+ ax.view_init(elev=elev, azim=azim + 10)
+ elif event.key == 'up' or event.key == 'w':
+ ax.view_init(elev=elev + 10, azim=azim)
+ elif event.key == 'down' or event.key == 's':
+ ax.view_init(elev=elev - 10, azim=azim)
+
+ plt.draw()
+ # Connect the key press event to the handler
+ fig.canvas.mpl_connect('key_press_event', on_key)
+
+ plt.show()
+
+def visualize_two_pts_sdfs(pts1, sdf1, pts2, sdf2, max_n=5000, cval_geo1=True, cval_geo2=True):
+ '''
+ If sdf is provided, use sdf to colorize,
+ else if cval_geo is True, use position to colorize,
+ otherwise use a constant color.
+ '''
+ cval_sdf1 = True
+ if isinstance(pts1, str):
+ pts1 = torch.load(pts1)
+ if isinstance(sdf1, str):
+ sdf1 = torch.load(sdf1)
+ elif sdf1 is None:
+ sdf1 = torch.zeros_like(pts1[:,0])
+ cval_sdf1 = False
+
+ cval_sdf2 = True
+ if isinstance(pts2, str):
+ pts2 = torch.load(pts2)
+ if isinstance(sdf2, str):
+ sdf2 = torch.load(sdf2)
+ elif sdf2 is None:
+ sdf2 = torch.zeros_like(pts2[:,0])
+ cval_sdf2 = False
+
+ if isinstance(pts1, list):
+ pts1 = torch.cat(pts1, 0)
+ sdf1 = torch.cat(sdf1, 0)
+
+ if isinstance(pts2, list):
+ pts2 = torch.cat(pts2, 0)
+ sdf2 = torch.cat(sdf2, 0)
+
+ if pts2.ndim == 4:
+ sample_id = 0 #150
+ pts2 = pts2[sample_id].reshape(-1, 3)
+ sdf2 = sdf2[sample_id].reshape(-1)
+
+ print(f"Before subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}")
+ if max_n > 0:
+ if pts1.shape[0]>max_n:
+ idxs = np.random.permutation(pts1.shape[0])
+ pts1 = pts1[idxs[:max_n]]
+ sdf1 = sdf1[idxs[:max_n]]
+ if pts2.shape[0]>max_n:
+ idxs = np.random.permutation(pts2.shape[0])
+ pts2 = pts2[idxs[:max_n]]
+ sdf2 = sdf2[idxs[:max_n]]
+
+ print(f"After subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}")
+
+ if cval_sdf1:
+ # cmap1 = 'coolwarm'
+ cmap1 = 'twilight'
+ cval1 = sdf1
+ elif cval_geo1:
+ cmap1 = 'viridis'
+ # cval1 = pts1.sum(1)
+ cval1 = pts1[:, 2]
+ else:
+ cmap1 = 'Greens'
+ cval1 = sdf1
+
+ if cval_sdf2:
+ # cmap2 = 'twilight'
+ cmap2 = 'coolwarm'
+ cval2 = sdf2
+ elif cval_geo2:
+ cmap2 = 'plasma'
+ # cval2 = pts2.sum(1)
+ cval2 = pts2[:, 1]
+ else:
+ cmap2 = 'Greys'
+ cval2 = sdf2
+
+ fig = plt.figure(figsize=(8, 8))
+ ax = fig.add_subplot(111, projection='3d')
+ pts1 = pts1.cpu().detach().numpy()
+ pts2 = pts2.cpu().detach().numpy()
+ # ax.scatter(pts1[:,0], pts1[:,1], pts1[:,2], color='red', s=1)
+ colored1 = ax.scatter(pts1[:, 0], pts1[:, 1], pts1[:, 2], c=cval1,
+ cmap=cmap1, marker='o', vmin=-1, vmax=1,
+ label='pts1', s=1)
+ # ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c='green',
+ # marker='.', s=1)
+ colored2 = ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c=cval2, cmap=cmap2, vmin=-1, vmax=1,
+ marker='x', label='pts2', s=1)
+ # ax.scatter(rendered_pts[:, 0], rendered_pts[:, 1], rendered_pts[:, 2])
+ # Because both scatter series are using the 'viridis' color map, the
+ # colorbar will share a mapping for both series.
+ if cval_sdf1:
+ cbar = fig.colorbar(colored1)
+ cbar.set_label('sdf1')
+ elif cval_geo1:
+ cbar = fig.colorbar(colored1)
+ cbar.set_label('pts1')
+
+ if cval_sdf2:
+ cbar = fig.colorbar(colored2)
+ cbar.set_label('sdf2')
+ elif cval_geo2:
+ cbar = fig.colorbar(colored2)
+ cbar.set_label('pts2')
+
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ ax.legend()
+
+ # Set equal aspect ratio.
+ ax.set_box_aspect([np.ptp(arr) for arr in \
+ [ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]])
+ plt.show()
+
+def convert_to_grayscale(folder_path):
+ # Iterate over each file in the folder
+ for filename in os.listdir(folder_path):
+ file_path = os.path.join(folder_path, filename)
+
+ # Open the image file
+ image = Image.open(file_path)
+
+ # Convert the image to grayscale
+ image_gray = image.convert("L")
+ print(np.array(image_gray).shape)
+ # Save the grayscale image
+ image_gray.save(file_path)
+
+def uniform_sample(tensor, num_samples):
+ N = tensor.size(0)
+ if num_samples > N:
+ raise ValueError("num_samples must be less than or equal to the number of points in the tensor")
+ indices = torch.randperm(N)[:num_samples]
+ return tensor[indices]
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--vision-asset',
+ type=str,
+ default=None,
+ help="directory of the asset folder e.g. cube_2; encodes " + \
+ "system and tosses.")
+ parser.add_argument('--bundlesdf-id',
+ type=str,
+ default=None,
+ help="what BundleSDF run ID associated with pose outputs to use.")
+ parser.add_argument('--nerf-bundlesdf-id',
+ type=str,
+ default=None,
+ help="what BundleSDF run ID associated with NeRF outputs to use.")
+ parser.add_argument('--cycle-iteration',
+ type=int,
+ default=1,
+ help="BundleSDF iteration number (can't choose 0 since that " + \
+ "means use TagSLAM poses).")
+ parser.add_argument('--slice-x-coord', '-x',
+ type=float,
+ default=1,
+ help="x-coordinate of the slice to visualize.")
+ args = parser.parse_args()
+ # toss_id = args.toss_id
+ # type = args.type
+ # folder = f'./data/{type}_{toss_id}/Annotations'
+
+ # obj_file = 'results/cube_2/mesh_cleaned.obj' # normalized space. textured_mesh.obj is in real-world.
+ # mesh_cleaned = trimesh.load(obj_file, force='mesh')
+ # mesh_cleaned_pts = mesh_cleaned.sample(10000)
+ # mesh_cleaned_pts = torch.tensor(mesh_cleaned_pts)
+ # convert_to_grayscale(folder)
+ # rendered_pts = torch.load('rendered_pts.pt')
+ # rendered_pts = uniform_sample(rendered_pts, num_samples=2000)
+ # rendered_pts = rendered_pts.detach().numpy()
+ # print(rendered_pts.shape)
+ # visualize_pts_locally('support_pts_processed.pt','sdfs_from_cnets.pt','sampled_pts_processed.pt', 'sdf_bounds_from_cnets.pt')
+ # visualize_pts_locally('sdf_ptsall_2.pt', 'sdf_ptsall_2_predsdf.pt', 'contact_pts_new.pt', None) # 'sdf_pts2.pt')
+ # visualize_pts_locally('sdf_ptsall_1.pt', 'sdf_ptsall_1_predsdf.pt', mesh_cleaned_pts, None, 5000) # 'sdf_pts2.pt')
+ # visualize_pts_locally('contact_pts_gt.pt', 'contact_and_near_surface_sdf.pt', 'empty_pts2.pt', 'sdf_pts2.pt')
+ ### support_pts.pt: support points and the sampled points along the querying direction near the support points
+ ### sdfs_from_cnets: the sdf values of the above points
+ ### sampled_pts.pt: points sampled inside and outside of the hyperplanes
+ ### sdf_bounds_from_cnets: the sdf lower bound of these points (i.e., signed distance to the hyperplane)
+ ### 1: support points; 2: sampled points
+ ### empty: empty mask; sdf_: sdf mask
+ ### contact_pts_new: ground truth mesh after icp with bundlesdf output mesh
+ ### depth_pts_new: bundlesdf sampled points in occupied octree voxels along the rays
+
+ # # obj_file = 'assets/mesh_cleaned_cube2.obj'
+ # obj_file = 'results/cube_1/tagslam/bundlesdf_id_mt01/nerf_runs/bundlesdf_id_mt01/mesh_cleaned.obj'
+ # obj_file = 'results/cube_1/tagslam/bundlesdf_id_01/nerf_runs/bundlesdf_id_01/mesh_cleaned.obj'
+ # obj_file = 'results/cube_1/tagslam/bundlesdf_id_01/nerf_runs/bundlesdf_id_01/mesh_cleaned.obj'
+ # obj_file = 'results/cube_2/bundlesdf_iteration_2/bundlesdf_id_02/nerf_runs/bundlesdf_id_02/mesh_cleaned.obj'
+ # obj_file = 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_corners/nerf_runs/bundlesdf_id_02_corners/mesh_cleaned.obj'
+ # obj_file = 'results/cube_1-3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/mesh_cleaned.obj'
+
+ # results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-mhc
+ # nerf_path = 'results/cube_1-4/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-sphc'
+ # nerf_path = 'results/milk_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-mhc'
+ # nerf_path = 'results/bakingbox_1-5/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-nhc'
+ # nerf_path = 'results/bakingbox_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-mhc'
+ # nerf_path = 'results/bakingbox_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00'
+ # nerf_path = 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00-robot2/nerf_runs/bundlesdf_id_00-robot2'
+ # nerf_path = 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00-nrobot/nerf_runs/bundlesdf_id_00-nrobot'
+ # nerf_path_mesh = 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00-nrobot/nerf_runs/bundlesdf_id_00-nrobot'
+ # nerf_path = 'results/bakingbox_1/bundlesdf_iteration_2/bundlesdf_id_02-cvwo2/nerf_runs/bundlesdf_id_02-cvwo2'
+ # nerf_path_mesh = 'results/bakingbox_1/bundlesdf_iteration_2/bundlesdf_id_02-cvwo2/nerf_runs/bundlesdf_id_02-cvwo2'
+ # nerf_path = 'results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo/nerf_runs/bundlesdf_id_00-cvwo2x2'
+ # nerf_path_mesh = 'results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo/nerf_runs/bundlesdf_id_00-cvwo2x2'
+ nerf_path = f'results/{args.vision_asset}/bundlesdf_iteration_{args.cycle_iteration}/' + \
+ f'bundlesdf_id_{args.bundlesdf_id}/nerf_runs/bundlesdf_id_{args.nerf_bundlesdf_id}'
+ nerf_path_mesh = nerf_path
+ # nerf_path = 'results/duck_1-2/bundlesdf_iteration_2/bundlesdf_id_00-nhc/nerf_runs/bundlesdf_id_00-nhc'
+ obj_file = nerf_path_mesh + '/mesh_cleaned.obj'
+
+ mesh_cleaned = trimesh.load(obj_file, force='mesh')
+ mesh_cleaned_pts = mesh_cleaned.sample(3000)
+ mesh_cleaned_pts = torch.tensor(mesh_cleaned_pts)
+ # visualize(mesh_cleaned_pts)
+ # visualize('data/cube/cube_2/support_pts.pt')
+ # visualize('data/cube/cube_2/sampled_pts.pt')
+
+ fig_title = f'{args.vision_asset}_{args.bundlesdf_id}_{args.nerf_bundlesdf_id}_iter{args.cycle_iteration}'
+ # from cnets-data-generation.vis_utils import visualize_three_pts_sdfs
+ visualize_three_pts_sdfs(
+ mesh_cleaned_pts, None,
+ nerf_path+'/sdf_inspection/cps_near_pcd.pt',
+ nerf_path+'/sdf_inspection/cps_near_sdf_gt.pt',
+ # nerf_path+'/sdf_inspection/hps_near_pcd.pt',
+ # nerf_path+'/sdf_inspection/hps_near_sdf_gt.pt',
+ # nerf_path+'/sdf_inspection/cps_slices.pt',
+ # nerf_path+'/sdf_inspection/cps_slices_predsdf.pt',
+ # nerf_path+'/sdf_inspection/near_pts.pt',
+ # nerf_path+'/sdf_inspection/near_pts_predsdf.pt',
+ nerf_path+'/sdf_inspection/interp_pts.pt',
+ nerf_path+'/sdf_inspection/interp_pts_predsdf.pt',
+
+ # nerf_path+'/sdf_inspection/cps_empty_pcd.pt',
+ # nerf_path+'/sdf_inspection/cps_empty_sdf_pred.pt',
+ # nerf_path+'/sdf_inspection/hps_empty_pcd.pt',
+ # nerf_path+'/sdf_inspection/hps_empty_sdf_gt.pt',
+ # video_output_file=nerf_path+'/sdf_inspection/vis.mp4',
+ fig_title=fig_title,
+ slice_x_coord=args.slice_x_coord,
+ # cval_geo1=False
+ )
+
+ # interpolate_end_pts = nerf_path + '/sdf_inspection/interp_end_pts.pt'
+ # interpolate_end_pts_sdf = nerf_path + '/sdf_inspection/interp_end_pts_sdf.pt'
+ # pts = torch.load(interpolate_end_pts)
+ # sdf = torch.load(interpolate_end_pts_sdf)
+ # print(f"{pts.shape=}, {sdf.shape=}, {pts.shape[0] % 6=}")
+ # if pts.shape[0] % 6 == 0:
+ # pts_support_hypothesized = pts[:pts.shape[0]//6]
+ # sdf_support_hypothesized = sdf[:sdf.shape[0]//6]
+ # pts_vis_hypothesized = pts[pts.shape[0]//6:]
+ # sdf_vis_hypothesized = sdf[sdf.shape[0]//6:]
+ # visualize_three_pts_sdfs(
+ # mesh_cleaned_pts, None,
+ # pts_support_hypothesized, sdf_support_hypothesized,
+ # pts_vis_hypothesized, sdf_vis_hypothesized,
+ # )
+
+ # obj_file = 'pretrained.obj'
+ # mesh_pretrain = trimesh.load(obj_file, force='mesh')
+ # mesh_pretrain_pts = mesh_pretrain.sample(10000)
+ # mesh_pretrain_pts = torch.tensor(mesh_pretrain_pts)
+ # visualize_pts_sdfs(mesh_pretrain_pts, torch.zeros_like(mesh_pretrain_pts[:,0]))
+ # pts = torch.load('data/cube/cube_2/support_pts.pt')
+ # sdfs = torch.load('data/cube/cube_2/sdfs_from_cnets.pt')
+ # pts = torch.load('data/cube/cube_2/sampled_pts.pt')
+ # sdfs = torch.load('data/cube/cube_2/sdf_bounds_from_cnets.pt')
+ # visualize_pts_sdfs(pts, sdfs)
+
+ # visualize_two_pts_sdfs(mesh_pretrain_pts, None, mesh_cleaned_pts, None, 5000, False,False)
+
+ # visualize_two_pts_sdfs('sdf_ptsall_1.pt', 'sdf_ptsall_1_predsdf.pt', mesh_cleaned_pts, None, 5000)
+
+ # visualize_two_pts_sdfs('data/cube/cube_2/support_pts.pt', 'data/cube/cube_2/sdfs_from_cnets.pt', 'data/cube/cube_2/sampled_pts.pt', 'data/cube/cube_2/sdf_bounds_from_cnets.pt', 5000, False,False)
+ # visualize_two('contact_pts_gt.pt', 'sdf_pts1.pt')
+ # visualize_two('empty_pts1.pt', 'sdf_pts1.pt')
+
+ # visualize_pts_sdfs('data/cube/cube_2/10traj/support_pts.pt', 'data/cube/cube_2/10traj/sdfs_from_cnets.pt')
+ # visualize_pts_sdfs('assets/support_pts.pt', 'assets/sdfs_from_cnets.pt')
+ # visualize_two_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_1/tagslam/bundlesdf_id_mt01/nerf_runs/bundlesdf_id_mt01/sdf_inspection/cps_near_pcd.pt', \
+ # 'results/cube_1/tagslam/bundlesdf_id_mt01/nerf_runs/bundlesdf_id_mt01/sdf_inspection/cps_near_sdf_gt.pt')
+ # visualize_two_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_cvxvo20_oct1/sdf_inspection/cps_near_pcd.pt', \
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_cvxvo20_oct1/sdf_inspection/cps_near_sdf_pred.pt')
+
+ # visualize_slice_as_image(torch.load('results/cube_1/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00_slices200/sdf_inspection/cps_slices_predsdf.pt'), 'slices')
+ # visualize_slice_as_image(torch.load('results/2022-11-18-15-10-24_milk/bundlesdf_iteration_1/bundlesdf_id_00_slice200/nerf_runs/bundlesdf_id_00_slice200/sdf_inspection/cps_slices_predsdf.pt'), 'slices')
+ # visualize_slice_as_image(torch.load('results/milk_3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices_predsdf.pt'), 'slices')
+ # visualize_slice_as_image(torch.load('results/cube_1-3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices_predsdf.pt'), 'slices')
+
+ # visualize_slice_as_image(torch.load('results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_bsdforig_lastckpt_slices50/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices200_cvx_oct2/sdf_inspection/cps_slices_predsdf.pt'), 'slices')
+ # visualize_two_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_1-3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices.pt', \
+ # 'results/cube_1-3/bundlesdf_iteration_1/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices_predsdf.pt')
+
+ # visualize_three_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_near_pcd.pt',
+ # 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_near_sdf_pred.pt',
+ # 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices.pt', \
+ # 'results/cube_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00/sdf_inspection/cps_slices_predsdf.pt')
+
+ # visualize_three_pts_sdfs(
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/interp_end_pts.pt', \
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/interp_end_pts_sdf.pt',
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/cps_near_pcd.pt',
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/cps_near_sdf_pred.pt',
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/cps_slices.pt', \
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r4/sdf_inspection/cps_slices_predsdf.pt',)
+
+ # visualize_three_pts_sdfs(
+ # mesh_cleaned_pts, None,
+ # # mesh_cleaned_pts, None,
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-nhc/sdf_inspection/cps_near_pcd.pt',
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-nhc/sdf_inspection/cps_near_sdf_pred.pt',
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-nhc/sdf_inspection/hps_near_pcd.pt',
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-nhc/sdf_inspection/hps_near_sdf_pred.pt',
+ # )
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-mhc/sdf_inspection/interp_pts.pt', \
+ # 'results/bottle_1-3/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-mhc/sdf_inspection/interp_pts_predsdf.pt',)
+
+ # 'results/bakingbox_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00_cvxw1000_s2_gtneg2sp1000nhc/sdf_inspection/hps_empty_pcd.pt', \
+ # 'results/bakingbox_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00_cvxw1000_s2_gtneg2sp1000nhc/sdf_inspection/hps_empty_sdf_gt.pt',)
+
+ # visualize_three_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/cps_near_pcd.pt',
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/cps_near_sdf_pred.pt',
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/interp_end_pts.pt', \
+ # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/interp_end_pts_sdf.pt')
+ # # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/interp_pts.pt', \
+ # # 'results/bottle_1-2/bundlesdf_iteration_2/bundlesdf_id_00/nerf_runs/bundlesdf_id_00-r2/sdf_inspection/interp_pts_sdfub.pt')
+
+ # near_pts
+ # near_pts_predsdf
+ # visualize_three_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_bsdforig_lastckpt_slices50/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_neg/sdf_inspection/free_pts.pt',
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_bsdforig_lastckpt_slices50/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_neg/sdf_inspection/free_pts_predsdf.pt',
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_bsdforig_lastckpt_slices50/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_neg/sdf_inspection/uncertain_pts.pt', \
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_bsdforig_lastckpt_slices50/nerf_runs/bundlesdf_id_02_bsdforig_lastckpt_slices50_neg/sdf_inspection/uncertain_pts_predsdf.pt')
+
+ # visualize_two_pts_sdfs(mesh_cleaned_pts, None,
+ # 'results/cube_2/bundlesdf_iteration_2/bundlesdf_id_02/nerf_runs/bundlesdf_id_02/sdf_inspection/cps_near_pcd.pt', \
+ # None)
+ # 'results/cube_1/bundlesdf_iteration_2/bundlesdf_id_02_corners/nerf_runs/bundlesdf_id_02_corners/sdf_inspection/cps_near_sdf_pred.pt')
+
+ # visualize_pts_sdfs('results/find_good_case/cube_9_1/hps_near_pcd.pt', 'results/find_good_case/cube_9_1/hps_near_sdf_pred.pt')
+
+ # visualize_pts_sdfs('sdf_ptsall_1.pt', 'sdf_ptsall_1_gtsdf.pt')
+ # visualize_pts_sdfs('empty_ptsall_1.pt', 'empty_ptsall_1_gtsdf.pt')
+ # visualize_pts_sdfs('sdf_ptsall_2.pt', 'sdf_ptsall_2_gtsdflb.pt')
+ # visualize_pts_sdfs('empty_ptsall_2.pt', 'empty_ptsall_2_gtsdflb.pt')
+ # visualize_pts_sdfs('empty_ptsall_2.pt', 'empty_ptsall_2_predsdf.pt')
+
+ # visualize_pts_sdfs('sdf_ptsall_1_s.pt', 'sdf_ptsall_1_gtsdf_s.pt')
+ # visualize_pts_sdfs('empty_ptsall_1_s.pt', 'empty_ptsall_1_gtsdf_s.pt')
+ # visualize_pts_sdfs('sdf_ptsall_2_s.pt', 'sdf_ptsall_2_gtsdflb_s.pt')
+ # visualize_pts_sdfs('empty_ptsall_2_s_mt.pt', 'empty_ptsall_2_predsdf_s_mt.pt')
+ # visualize_pts_sdfs('empty_ptsall_2_mt.pt', 'empty_ptsall_2_predsdf_mt.pt')
+
+ # visualize_pts_sdfs('sdf_ptsall_1_r.pt', 'sdf_ptsall_1_gtsdf_r.pt')
+ # visualize_pts_sdfs('empty_ptsall_1_r.pt', 'empty_ptsall_1_gtsdf_r.pt')
+ # visualize_pts_sdfs('sdf_ptsall_2_r.pt', 'sdf_ptsall_2_gtsdflb_r.pt')
+ # visualize_pts_sdfs('empty_ptsall_2_r.pt', 'empty_ptsall_2_gtsdflb_r.pt')
+ # visualize_pts_sdfs('empty_ptsall_2_r.pt', 'empty_ptsall_2_predsdf_r.pt')
+
+ # visualize_pts_sdfs('sdf_ptsall_1_d.pt', 'sdf_ptsall_1_gtsdf_d.pt')
+ # visualize_pts_sdfs('empty_ptsall_1_d.pt', 'empty_ptsall_1_gtsdf_d.pt')
+ # visualize_pts_sdfs('sdf_ptsall_2_d.pt', 'sdf_ptsall_2_gtsdflb_d.pt')
+ # visualize_pts_sdfs('empty_ptsall_2_d.pt', 'empty_ptsall_2_gtsdflb_d.pt')
\ No newline at end of file
diff --git a/bundlesdf.py b/bundlesdf.py
deleted file mode 100644
index 66a2bfb..0000000
--- a/bundlesdf.py
+++ /dev/null
@@ -1,818 +0,0 @@
-# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-
-from Utils import *
-from nerf_runner import *
-from tool import *
-CODE_DIR = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(f'{CODE_DIR}/BundleTrack/build')
-import my_cpp
-from gui import *
-from BundleTrack.scripts.data_reader import *
-from Utils import *
-from loftr_wrapper import LoftrRunner
-import multiprocessing,threading
-try:
- multiprocessing.set_start_method('spawn')
-except:
- pass
-
-
-def run_gui(gui_dict, gui_lock):
- print("GUI started")
- with gui_lock:
- gui = BundleSdfGui(img_height=200)
- gui_dict['started'] = True
-
- local_dict = {}
-
- while dpg.is_dearpygui_running():
- with gui_lock:
- if gui_dict['join']:
- break
-
- for k in ['mesh','color','mask','ob_in_cam','id_str','K','n_keyframe','nerf_num_frames']:
- if k in gui_dict:
- local_dict[k] = gui_dict[k]
- del gui_dict[k]
-
- if 'nerf_num_frames' in local_dict:
- gui.set_nerf_num_frames(local_dict['nerf_num_frames'])
-
- if 'mesh' in local_dict:
- logging.info(f"mesh V: {local_dict['mesh'].vertices.shape}")
- gui.update_mesh(local_dict['mesh'])
-
- if 'color' in local_dict:
- gui.update_frame(rgb=local_dict['color'], mask=local_dict['mask'], ob_in_cam=local_dict['ob_in_cam'], id_str=local_dict['id_str'], K=local_dict['K'], n_keyframe=local_dict['n_keyframe'])
-
- local_dict = {}
-
- dpg.render_dearpygui_frame()
- time.sleep(0.03)
-
- dpg.destroy_context()
-
-
-
-def run_nerf(p_dict, kf_to_nerf_list, lock, cfg_nerf, translation, sc_factor, start_nerf_keyframes, use_gui, gui_lock, gui_dict, debug_dir, with_cnets):
- vox_res = 0.01
- nerf_num_frames = 0
- cnt_nerf = -1
- rgbs_all = []
- depths_all = []
- normal_maps_all = []
- masks_all = []
- occ_masks_all = []
- prev_pcd_real_scale = None
- tf_normalize = None
- if translation is not None:
- tf_normalize = np.eye(4)
- tf_normalize[:3,3] = translation
- tf1 = np.eye(4)
- tf1[:3,:3] *= sc_factor
- tf_normalize = tf1@tf_normalize
- cfg_nerf['sc_factor'] = float(sc_factor)
- cfg_nerf['translation'] = translation
-
- with lock:
- SPDLOG = p_dict['SPDLOG']
-
- while 1:
- with lock:
- join = p_dict['join']
-
- if join:
- break
-
- skip = False
- with lock:
- if cnt_nerf==-1 and len(kf_to_nerf_list)0:
- p_dict['running'] = True
- frame_id = p_dict['frame_id']
- cam_in_obs = p_dict['cam_in_obs'].copy()
- rgbs = []
- depths = []
- normal_maps = []
- masks = []
- occ_masks = []
- for f in kf_to_nerf_list:
- rgbs.append(f['rgb'])
- depths.append(f['depth'])
- masks.append(f['mask'])
- if f['normal_map'] is not None:
- normal_maps.append(f['normal_map'])
- if f['occ_mask'] is not None:
- occ_masks.append(f['occ_mask'])
- K = p_dict['K']
- nerf_num_frames += len(rgbs)
- p_dict['nerf_num_frames'] = nerf_num_frames
- kf_to_nerf_list[:] = []
- if use_gui:
- with gui_lock:
- gui_dict['nerf_num_frames'] = nerf_num_frames
- else:
- skip = True
-
- if skip:
- time.sleep(0.01)
- continue
-
- cnt_nerf += 1
- rgbs_all += list(rgbs)
- depths_all += list(depths)
- masks_all += list(masks)
- if normal_maps is not None:
- normal_maps_all += list(normal_maps)
- if occ_masks is not None:
- occ_masks_all += list(occ_masks)
-
- out_dir = f"{debug_dir}/{frame_id}/nerf"
- logging.info(f"out_dir: {out_dir}")
- os.makedirs(out_dir, exist_ok=True)
- os.system(f"rm -rf {cfg_nerf['datadir']} && mkdir -p {cfg_nerf['datadir']}")
-
- glcam_in_obs = cam_in_obs@glcam_in_cvcam
-
- if cfg_nerf['continual']:
- if cnt_nerf==0:
- if translation is None:
- # if with_cnets:
- # offset = np.loadtxt(cfg_nerf['debug_dir']+'offset.txt')
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds_from_cn(None,glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'], annotated_poses_dir=None,offset=offset)
- # else:
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'])
- sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'])
- sc_factor *= 0.7 # Ensure whole object within bound
- cfg_nerf['sc_factor'] = float(sc_factor)
- cfg_nerf['translation'] = translation
- tf_normalize = np.eye(4)
- tf_normalize[:3,3] = translation
- tf1 = np.eye(4)
- tf1[:3,:3] *= sc_factor
- tf_normalize = tf1@tf_normalize
-
- pcd_all = pcd_real_scale
-
- else:
- pcd_all = prev_pcd_real_scale
- for i in range(len(rgbs)):
- pts, colors = compute_scene_bounds_worker(None,K,glcam_in_obs[len(glcam_in_obs)-len(rgbs)+i],use_mask=True,rgb=rgbs[i],depth=depths[i],mask=masks[i])
- pcd_all += toOpen3dCloud(pts, colors)
- pcd_all = pcd_all.voxel_down_sample(vox_res)
- _,keep_mask = find_biggest_cluster(np.asarray(pcd_all.points), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'])
- keep_ids = np.arange(len(np.asarray(pcd_all.points)))[keep_mask]
- pcd_all = pcd_all.select_by_index(keep_ids)
-
- ########## Clear memory
- rgbs_all = []
- depths_all = []
- normal_maps_all = []
- masks_all = []
- occ_masks_all = []
-
- pcd_normalized = copy.deepcopy(pcd_all)
- pcd_normalized.transform(tf_normalize)
- if normal_maps is not None and len(normal_maps)>0:
- normal_maps = np.array(normal_maps)
- else:
- normal_maps = None
- rgbs,depths,masks,normal_maps,poses = preprocess_data(np.array(rgbs),np.array(depths),np.array(masks),normal_maps=normal_maps,poses=glcam_in_obs,sc_factor=cfg_nerf['sc_factor'],translation=cfg_nerf['translation'])
-
- else:
- logging.info(f"compute_scene_bounds, latest nerf frame {frame_id}")
- # if with_cnets:
- # offset = np.loadtxt(cfg_nerf['debug_dir']+'offset.txt')
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds_from_cn(None,glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'],annotated_poses_dir=cfg_nerf['data_dir']+'/annotated_poses/',offset=offset)
- # else:
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'])
- sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,K,use_mask=True,base_dir=cfg_nerf['save_dir'],rgbs=np.array(rgbs_all),depths=np.array(depths_all),masks=np.array(masks_all), eps=cfg_nerf['dbscan_eps'], min_samples=cfg_nerf['dbscan_eps_min_samples'])
-
- cfg_nerf['sc_factor'] = float(sc_factor)
- cfg_nerf['translation'] = translation
-
- if normal_maps_all is not None and len(normal_maps_all)>0:
- normal_maps = np.array(normal_maps_all)
- else:
- normal_maps = None
-
- logging.info(f"preprocess_data, latest nerf frame {frame_id}")
- rgbs,depths,masks,normal_maps,poses = preprocess_data(np.array(rgbs_all),np.array(depths_all),np.array(masks_all),normal_maps=normal_maps,poses=glcam_in_obs,sc_factor=cfg_nerf['sc_factor'],translation=cfg_nerf['translation'])
-
- # cfg_nerf['sampled_frame_ids'] = np.arange(len(rgbs_all))
-
-
- if SPDLOG>=2:
- np.savetxt(f"{cfg_nerf['save_dir']}/trainval_poses.txt",glcam_in_obs.reshape(-1,4))
- np.savetxt(f"{debug_dir}/{frame_id}/poses_before_nerf.txt",np.array(cam_in_obs).reshape(-1,4))
-
- if len(occ_masks_all)>0:
- if cfg_nerf['continual']:
- occ_masks = np.array(occ_masks)
- else:
- occ_masks = np.array(occ_masks_all)
- else:
- occ_masks = None
-
- if cnt_nerf==0:
- logging.info(f"First nerf run, create Runner, latest nerf frame {frame_id}")
- nerf = NerfRunner(cfg_nerf,rgbs,depths=depths,masks=masks,normal_maps=normal_maps,occ_masks=occ_masks,poses=poses,K=K,build_octree_pcd=pcd_normalized)
- else:
- if cfg_nerf['continual']:
- logging.info(f"add_new_frames, latest nerf frame {frame_id}")
- nerf.add_new_frames(rgbs,depths,masks,normal_maps,poses,occ_masks=occ_masks, new_pcd=pcd_normalized, reuse_weights=False)
- else:
- nerf = NerfRunner(cfg_nerf,rgbs,depths=depths,masks=masks,normal_maps=normal_maps,occ_masks=occ_masks,poses=poses,K=K,build_octree_pcd=pcd_normalized)
-
- logging.info(f"Start training, latest nerf frame {frame_id}")
- nerf.train()
- logging.info(f"Training done, latest nerf frame {frame_id}")
-
- optimized_cvcam_in_obs,offset = get_optimized_poses_in_real_world(poses,nerf.models['pose_array'],cfg_nerf['sc_factor'],cfg_nerf['translation'])
-
- logging.info("Getting mesh")
- mesh = nerf.extract_mesh(isolevel=0,voxel_size=cfg_nerf['mesh_resolution'])
- mesh = mesh_to_real_world(mesh, pose_offset=offset, translation=nerf.cfg['translation'], sc_factor=nerf.cfg['sc_factor'])
-
- with lock:
- p_dict['optimized_cvcam_in_obs'] = optimized_cvcam_in_obs
- p_dict['running'] = False
- # p_dict['nerf_last'] = nerf #!NOTE not pickable
- p_dict['mesh'] = mesh
-
- logging.info(f"nerf done at frame {frame_id}")
-
- if cfg_nerf['continual']:
- prev_pcd_real_scale = pcd_all.voxel_down_sample(vox_res)
-
- ####### Log
- if SPDLOG>=2:
- os.system(f"cp -r {cfg_nerf['save_dir']}/image_step_*.png {out_dir}/")
- with open(f"{out_dir}/config.yml",'w') as ff:
- tmp = copy.deepcopy(cfg_nerf)
- for k in tmp.keys():
- if isinstance(tmp[k],np.ndarray):
- tmp[k] = tmp[k].tolist()
- yaml.dump(tmp,ff)
- shutil.copy(f"{out_dir}/config.yml",f"{cfg_nerf['save_dir']}/")
- np.savetxt(f"{debug_dir}/{frame_id}/poses_after_nerf.txt",np.array(optimized_cvcam_in_obs).reshape(-1,4))
- logging.info(f'>>>>>>>>>>>>> saving opt poses to {debug_dir}/{frame_id}/poses_after_nerf.txt')
- mesh.export(f"{cfg_nerf['save_dir']}/mesh_real_world.obj")
- os.system(f"rm -rf {cfg_nerf['save_dir']}/step_*_mesh_real_world.obj {cfg_nerf['save_dir']}/*frame*ray*.ply && mv {cfg_nerf['save_dir']}/* {out_dir}/")
-
-
-
-
-class BundleSdf:
- def __init__(self, cfg_track_dir=f"{CODE_DIR}/config_ho3d.yml", cfg_nerf_dir=f'{CODE_DIR}/config.yml', start_nerf_keyframes=10, translation=None, sc_factor=None, use_gui=False, with_cnets=False):
- with open(cfg_track_dir,'r') as ff:
- self.cfg_track = yaml.load(ff)
- self.debug_dir = self.cfg_track["debug_dir"]
- self.SPDLOG = self.cfg_track["SPDLOG"]
- self.start_nerf_keyframes = start_nerf_keyframes
- self.use_gui = use_gui
- self.translation = None
- self.sc_factor = None
- if sc_factor is not None:
- self.translation = translation
- self.sc_factor = sc_factor
- self.with_cnets = with_cnets
-
- code_dir = os.path.dirname(os.path.realpath(__file__))
- with open(cfg_nerf_dir,'r') as ff:
- self.cfg_nerf = yaml.load(ff)
- self.cfg_nerf['notes'] = ''
- self.cfg_nerf['bounding_box'] = np.array(self.cfg_nerf['bounding_box']).reshape(2,3)
-
- self.manager = multiprocessing.Manager()
-
- if self.use_gui:
- self.gui_lock = multiprocessing.Lock()
- self.gui_dict = self.manager.dict()
- self.gui_dict['join'] = False
- self.gui_dict['started'] = False
- self.gui_worker = multiprocessing.Process(target=run_gui, args=(self.gui_dict, self.gui_lock))
- self.gui_worker.start()
- else:
- self.gui_lock = None
- self.gui_dict = None
-
- self.p_dict = self.manager.dict()
- self.kf_to_nerf_list = self.manager.list()
- self.lock = multiprocessing.Lock()
- self.p_dict['running'] = False
- self.p_dict['join'] = False
- self.p_dict['nerf_num_frames'] = 0
-
- self.p_dict['SPDLOG'] = self.SPDLOG
- self.p_nerf = multiprocessing.Process(target=run_nerf, args=(self.p_dict, self.kf_to_nerf_list, self.lock, self.cfg_nerf, self.translation, self.sc_factor, start_nerf_keyframes, self.use_gui, self.gui_lock, self.gui_dict, self.debug_dir, self.with_cnets))
- self.p_nerf.start()
-
- # self.p_dict = {}
- # self.lock = threading.Lock()
- # self.p_dict['running'] = False
- # self.p_dict['join'] = False
- # self.p_nerf = threading.Thread(target=self.run_nerf, args=(self.p_dict, self.lock))
- # self.p_nerf.start()
-
- yml = my_cpp.YamlLoadFile(cfg_track_dir)
- self.bundler = my_cpp.Bundler(yml)
- self.loftr = LoftrRunner()
- self.cnt = -1
- self.K = None
- self.mesh = None
-
-
- def on_finish(self):
- if self.use_gui:
- with self.gui_lock:
- self.gui_dict['join'] = True
- self.gui_worker.join()
-
- with self.lock:
- self.p_dict['join'] = True
- self.p_nerf.join()
- with self.lock:
- if self.p_dict['running']==False and 'optimized_cvcam_in_obs' in self.p_dict:
- for i_f in range(len(self.p_dict['optimized_cvcam_in_obs'])):
- self.bundler._keyframes[i_f]._pose_in_model = self.p_dict['optimized_cvcam_in_obs'][i_f]
- self.bundler._keyframes[i_f]._nerfed = True
- del self.p_dict['optimized_cvcam_in_obs']
-
-
- def make_frame(self, color, depth, K, id_str, mask=None, occ_mask=None, pose_in_model=np.eye(4)):
- H,W = color.shape[:2]
- roi = [0,W-1,0,H-1]
- frame = my_cpp.Frame(color,depth,roi,pose_in_model,self.cnt,id_str,K,self.bundler.yml)
- if mask is not None:
- frame._fg_mask = my_cpp.cvMat(mask)
- if occ_mask is not None:
- frame._occ_mask = my_cpp.cvMat(occ_mask)
- return frame
-
-
- def find_corres(self, frame_pairs):
- logging.info(f"frame_pairs: {len(frame_pairs)}")
- is_match_ref = len(frame_pairs)==1 and frame_pairs[0][0]._ref_frame_id==frame_pairs[0][1]._id and self.bundler._newframe==frame_pairs[0][0]
-
- imgs, tfs, query_pairs = self.bundler._fm.getProcessedImagePairs(frame_pairs)
- imgs = np.array([np.array(img) for img in imgs])
-
- if len(query_pairs)==0:
- return
-
- corres = self.loftr.predict(rgbAs=imgs[::2], rgbBs=imgs[1::2])
- for i_pair in range(len(query_pairs)):
- cur_corres = corres[i_pair][:,:4]
- tfA = np.array(tfs[i_pair*2])
- tfB = np.array(tfs[i_pair*2+1])
- cur_corres[:,:2] = transform_pts(cur_corres[:,:2], np.linalg.inv(tfA))
- cur_corres[:,2:4] = transform_pts(cur_corres[:,2:4], np.linalg.inv(tfB))
- self.bundler._fm._raw_matches[query_pairs[i_pair]] = cur_corres.round().astype(np.uint16)
-
- min_match_with_ref = self.cfg_track["feature_corres"]["min_match_with_ref"]
-
- if is_match_ref and len(self.bundler._fm._raw_matches[frame_pairs[0]])0:
- ref_frame = self.bundler._frames[list(self.bundler._frames.keys())[-1]]
- frame._ref_frame_id = ref_frame._id
- frame._pose_in_model = ref_frame._pose_in_model
- logging.info(f"pose_in_model")
- else:
- self.bundler._firstframe = frame
-
- frame.invalidatePixelsByMask(frame._fg_mask)
- logging.info(f"{frame._id}")
- logging.info(f"test{np.array(frame._pose_in_model)}")
- if frame._id==0 and np.abs(np.array(frame._pose_in_model)-np.eye(4)).max()<=1e-4:
- logging.info(f"first frame, set new init coordinate")
- frame.setNewInitCoordinate()
- logging.info(f"new coordinate")
-
-
- n_fg = (np.array(frame._fg_mask)>0).sum()
- logging.info(f"n_fg{n_fg}")
- if n_fg<100:
- logging.info(f"Frame {frame._id_str} cloud is empty, marked FAIL, roi={n_fg}")
- frame._status = my_cpp.Frame.FAIL;
- self.bundler.forgetFrame(frame)
- return
-
- if self.cfg_track["depth_processing"]["denoise_cloud"]:
- frame.pointCloudDenoise()
- logging.info(f"denoise")
- n_valid = frame.countValidPoints()
- n_valid_first = self.bundler._firstframe.countValidPoints()
- if n_valid=min_match_with_ref:
- logging.info(f"re-choose new ref frame to {kf._id_str}")
- found = True
- break
-
- if not found:
- frame._status = my_cpp.Frame.FAIL
- logging.info(f"frame {frame._id_str} has not suitable ref_frame, mark as FAIL")
- self.bundler.forgetFrame(frame)
- return
-
- logging.info(f"frame {frame._id_str} pose update before\n{frame._pose_in_model.round(3)}")
- offset = self.bundler._fm.procrustesByCorrespondence(frame, ref_frame)
- frame._pose_in_model = offset@frame._pose_in_model
- logging.info(f"frame {frame._id_str} pose update after\n{frame._pose_in_model.round(3)}")
-
- window_size = self.cfg_track["bundle"]["window_size"]
- if len(self.bundler._frames)-len(self.bundler._keyframes)>window_size:
- for k in self.bundler._frames:
- f = self.bundler._frames[k]
- isforget = self.bundler.forgetFrame(f)
- if isforget:
- logging.info(f"exceed window size, forget frame {f._id_str}")
- break
-
- self.bundler._frames[frame._id] = frame
-
- self.bundler.selectKeyFramesForBA()
-
- local_frames = self.bundler._local_frames
-
- pairs = self.bundler.getFeatureMatchPairs(self.bundler._local_frames)
- self.find_corres(pairs)
- if frame._status==my_cpp.Frame.FAIL:
- self.bundler.forgetFrame(frame)
- return
-
- find_matches = False
- self.bundler.optimizeGPU(local_frames, find_matches)
-
- if frame._status==my_cpp.Frame.FAIL:
- self.bundler.forgetFrame(frame)
- return
-
- self.bundler.checkAndAddKeyframe(frame)
-
-
-
- def run(self, color, depth, K, id_str, mask=None, occ_mask=None, pose_in_model=np.eye(4)):
- self.cnt += 1
-
- if self.K is None:
- self.K = K
- with self.lock:
- self.p_dict['K'] = self.K
-
- if self.use_gui:
- while 1:
- with self.gui_lock:
- started = self.gui_dict['started']
- if not started:
- time.sleep(1)
- logging.info("Waiting for GUI")
- continue
- break
-
- H,W = color.shape[:2]
- # depth = depth * 0.95
- percentile = self.cfg_track['depth_processing']["percentile"]
- # percentile = 100
- if percentile<100: # Denoise
- logging.info("percentile denoise start")
- valid = (depth>=0.1) & (mask>0)
- thres = np.percentile(depth[valid], percentile)
- depth[depth>=thres] = 0
- logging.info("percentile denoise done")
-
- frame = self.make_frame(color, depth, K, id_str, mask, occ_mask, pose_in_model)
- os.makedirs(f"{self.debug_dir}/{frame._id_str}", exist_ok=True)
-
- logging.info(f"processNewFrame start {frame._id_str}")
- # self.bundler.processNewFrame(frame)
- self.process_new_frame(frame)
- logging.info(f"processNewFrame done {frame._id_str}")
-
- if self.bundler._keyframes[-1]==frame:
- logging.info(f"{frame._id_str} prepare data for nerf")
-
- with self.lock:
- self.p_dict['frame_id'] = frame._id_str
- self.p_dict['running'] = True
- self.kf_to_nerf_list.append({
- 'rgb': np.array(frame._color).reshape(H,W,3)[...,::-1].copy(),
- 'depth': np.array(frame._depth).reshape(H,W).copy(),
- 'mask': np.array(frame._fg_mask).reshape(H,W).copy(),
- # 'occ_mask': occ_mask.reshape(H,W),
- # 'normal_map': np.array(frame._normal_map).copy(),
- 'occ_mask': None,
- 'normal_map': None,
- })
- cam_in_obs = []
- for f in self.bundler._keyframes:
- cam_in_obs.append(np.array(f._pose_in_model).copy())
- self.p_dict['cam_in_obs'] = np.array(cam_in_obs)
-
- if self.SPDLOG>=2:
- with open(f"{self.debug_dir}/{frame._id_str}/nerf_frames.txt",'w') as ff:
- for f in self.bundler._keyframes:
- ff.write(f"{f._id_str}\n")
-
- ############# Wait for sync
- while 1:
- with self.lock:
- running = self.p_dict['running']
- nerf_num_frames = self.p_dict['nerf_num_frames']
- if not running:
- break
- if len(self.bundler._keyframes)-nerf_num_frames>=self.cfg_nerf['sync_max_delay']:
- time.sleep(0.01)
- # logging.info(f"wait for sync len(self.bundler._keyframes):{len(self.bundler._keyframes)}, nerf_num_frames:{nerf_num_frames}")
- continue
- break
-
- rematch_after_nerf = self.cfg_track["feature_corres"]["rematch_after_nerf"]
- logging.info(f"rematch_after_nerf: {rematch_after_nerf}")
- frames_large_update = []
- with self.lock:
- if 'optimized_cvcam_in_obs' in self.p_dict:
- for i_f in range(len(self.p_dict['optimized_cvcam_in_obs'])):
- if rematch_after_nerf:
- trans_update = np.linalg.norm(self.p_dict['optimized_cvcam_in_obs'][i_f][:3,3]-self.bundler._keyframes[i_f]._pose_in_model[:3,3])
- rot_update = geodesic_distance(self.p_dict['optimized_cvcam_in_obs'][i_f][:3,:3], self.bundler._keyframes[i_f]._pose_in_model[:3,:3])
- if trans_update>=0.005 or rot_update>=5/180.0*np.pi:
- frames_large_update.append(self.bundler._keyframes[i_f])
- logging.info(f"{self.bundler._keyframes[i_f]._id_str}, trans_update={trans_update}, rot_update={rot_update}")
- self.bundler._keyframes[i_f]._pose_in_model = self.p_dict['optimized_cvcam_in_obs'][i_f]
- self.bundler._keyframes[i_f]._nerfed = True
- logging.info(f"synced pose from nerf, latest nerf frame {self.bundler._keyframes[len(self.p_dict['optimized_cvcam_in_obs'])-1]._id_str}")
- del self.p_dict['optimized_cvcam_in_obs']
-
- if self.use_gui:
- with self.gui_lock:
- if 'mesh' in self.p_dict:
- self.gui_dict['mesh'] = self.p_dict['mesh']
- del self.p_dict['mesh']
-
- if rematch_after_nerf:
- if len(frames_large_update)>0:
- with self.lock:
- nerf_num_frames = self.p_dict['nerf_num_frames']
- logging.info(f"before matches keys: {len(self.bundler._fm._matches)}")
- ks = list(self.bundler._fm._matches.keys())
- for k in ks:
- if k[0] in frames_large_update or k[1] in frames_large_update:
- del self.bundler._fm._matches[k]
- logging.info(f"Delete match between {k[0]._id_str} and {k[1]._id_str}")
- logging.info(f"after matches keys: {len(self.bundler._fm._matches)}")
-
- self.bundler.saveNewframeResult()
- if self.SPDLOG>=2 and occ_mask is not None:
- os.makedirs(f'{self.debug_dir}/occ_mask/', exist_ok=True)
- cv2.imwrite(f'{self.debug_dir}/occ_mask/{frame._id_str}.png', occ_mask)
-
- if self.use_gui:
- ob_in_cam = np.linalg.inv(frame._pose_in_model)
- with self.gui_lock:
- self.gui_dict['color'] = color[...,::-1]
- self.gui_dict['mask'] = mask
- self.gui_dict['ob_in_cam'] = ob_in_cam
- self.gui_dict['id_str'] = frame._id_str
- self.gui_dict['K'] = self.K
- self.gui_dict['n_keyframe'] = len(self.bundler._keyframes)
-
-
-
- def run_global_nerf(self, reader=None, get_texture=False, tex_res=1024):
- '''
- @reader: data reader, sometimes we want to use the full resolution raw image
- '''
- self.K = np.loadtxt(f'{self.debug_dir}/cam_K.txt').reshape(3,3)
-
- tmp = sorted(glob.glob(f"{self.debug_dir}/ob_in_cam/*"))
- last_stamp = os.path.basename(tmp[-1]).replace('.txt','')
- logging.info(f'last_stamp {last_stamp}')
- keyframes = yaml.load(open(f'{self.debug_dir}/{last_stamp}/keyframes.yml','r'))
- logging.info(f"keyframes#: {len(keyframes)}")
- keys = list(keyframes.keys())
- if len(keyframes)>self.cfg_nerf['n_train_image']:
- keys = [keys[0]] + list(np.random.choice(keys, self.cfg_nerf['n_train_image'], replace=False))
- keys = list(set(keys))
- logging.info(f"frame_ids too large, select subset num: {len(keys)}")
-
- frame_ids = []
- for k in keys:
- frame_ids.append(k.replace('keyframe_',''))
-
- cam_in_obs = []
- for k in keys:
- cam_in_ob = np.array(keyframes[k]['cam_in_ob']).reshape(4,4)
- cam_in_obs.append(cam_in_ob)
- cam_in_obs = np.array(cam_in_obs)
-
- out_dir = f"{self.debug_dir}/final/nerf"
- os.system(f"rm -rf {out_dir} && mkdir -p {out_dir}")
- os.system(f'rm -rf {self.debug_dir}/final/used_rgbs/ && mkdir -p {self.debug_dir}/final/used_rgbs/')
-
- rgbs = []
- depths = []
- normal_maps = []
- masks = []
- occ_masks = []
- for frame_id in frame_ids:
- if reader is not None:
- self.K = reader.K.copy()
- id = reader.id_strs.index(frame_id)
- rgbs.append(reader.get_color(id))
- depths.append(reader.get_depth(id))
- masks.append(reader.get_mask(id))
- else:
- self.cfg_nerf['down_scale_ratio'] = 1 # Images have been downscaled in tracking outputs
- rgb_file = f"{self.debug_dir}/color_segmented/{frame_id}.png"
- shutil.copy(rgb_file, f'{self.debug_dir}/final/used_rgbs/')
- rgb = imageio.imread(rgb_file)
- depth = cv2.imread(rgb_file.replace('color_segmented','depth_filtered'),-1)/1e3
- mask = cv2.imread(rgb_file.replace('color_segmented','mask'),-1)
- rgbs.append(rgb)
- depths.append(depth)
- masks.append(mask)
-
- glcam_in_obs = cam_in_obs@glcam_in_cvcam
-
- self.cfg_nerf['sc_factor'] = None
- self.cfg_nerf['translation'] = None
-
- ######### Reuse normalization
- files = sorted(glob.glob(f"{self.debug_dir}/**/nerf/config.yml", recursive=True))
- if len(files)>0:
- tmp = yaml.load(open(files[-1],'r'))
- self.cfg_nerf['sc_factor'] = float(tmp['sc_factor'])
- self.cfg_nerf['translation'] = np.array(tmp['translation'])
- # if self.with_cnets:
- # offset = np.loadtxt(self.debug_dir+'/offset.txt')
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds_from_cn(None,glcam_in_obs,self.K,use_mask=True,base_dir=self.cfg_nerf['save_dir'],rgbs=np.array(rgbs),depths=np.array(depths),masks=np.array(masks), cluster=True, eps=0.01, min_samples=5, sc_factor=self.cfg_nerf['sc_factor'], translation_cvcam=self.cfg_nerf['translation'],annotated_poses_dir=self.cfg_nerf['data_dir']+'/annotated_poses/',offset=offset)
- # else:
- # sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,self.K,use_mask=True,base_dir=self.cfg_nerf['save_dir'],rgbs=np.array(rgbs),depths=np.array(depths),masks=np.array(masks), eps=0.01, min_samples=5,sc_factor=self.cfg_nerf['sc_factor'], translation_cvcam=self.cfg_nerf['translation'])
- sc_factor,translation,pcd_real_scale, pcd_normalized = compute_scene_bounds(None, glcam_in_obs,self.K,use_mask=True,base_dir=self.cfg_nerf['save_dir'],rgbs=np.array(rgbs),depths=np.array(depths),masks=np.array(masks), eps=0.01, min_samples=5,sc_factor=self.cfg_nerf['sc_factor'], translation_cvcam=self.cfg_nerf['translation'])
-
- self.cfg_nerf['sc_factor'] = float(sc_factor)
- self.cfg_nerf['translation'] = translation
-
- if normal_maps is not None and len(normal_maps)>0:
- normal_maps = np.array(normal_maps)
- else:
- normal_maps = None
-
- rgbs_raw = np.array(rgbs).copy()
- rgbs,depths,masks,normal_maps,poses = preprocess_data(np.array(rgbs),depths=np.array(depths),masks=np.array(masks),normal_maps=normal_maps,poses=glcam_in_obs,sc_factor=self.cfg_nerf['sc_factor'],translation=self.cfg_nerf['translation'])
-
- self.cfg_nerf['sampled_frame_ids'] = np.arange(len(rgbs))
-
- np.savetxt(f"{self.cfg_nerf['save_dir']}/trainval_poses.txt",glcam_in_obs.reshape(-1,4))
-
- if len(occ_masks)>0:
- occ_masks = np.array(occ_masks)
- else:
- occ_masks = None
- self.cfg_nerf['debug_dir']=self.debug_dir
- nerf = NerfRunner(self.cfg_nerf,rgbs,depths=depths,masks=masks,normal_maps=normal_maps,occ_masks=occ_masks,poses=poses,K=self.K,build_octree_pcd=pcd_normalized)
- print("Start training")
- nerf.train()
- optimized_cvcam_in_obs,offset = get_optimized_poses_in_real_world(poses,nerf.models['pose_array'],self.cfg_nerf['sc_factor'],self.cfg_nerf['translation'])
-
- ####### Log
- os.system(f"cp -r {self.cfg_nerf['save_dir']}/image_step_*.png {out_dir}/")
- with open(f"{out_dir}/config.yml",'w') as ff:
- tmp = copy.deepcopy(self.cfg_nerf)
- for k in tmp.keys():
- if isinstance(tmp[k],np.ndarray):
- tmp[k] = tmp[k].tolist()
- yaml.dump(tmp,ff)
- shutil.copy(f"{out_dir}/config.yml",f"{self.cfg_nerf['save_dir']}/")
- os.system(f"mv {self.cfg_nerf['save_dir']}/* {out_dir}/ && rm -rf {out_dir}/step_*_mesh_real_world.obj {out_dir}/*frame*ray*.ply")
-
- torch.cuda.empty_cache()
-
- np.savetxt(f"{self.debug_dir}/{frame_id}/poses_after_nerf.txt",np.array(optimized_cvcam_in_obs).reshape(-1,4))
-
- # mesh_files = sorted(glob.glob(f"{self.debug_dir}/final/nerf/step_*_mesh_normalized_space.obj"))
- # mesh = trimesh.load(mesh_files[-1])
-
- mesh,sigma,query_pts = nerf.extract_mesh(voxel_size=self.cfg_nerf['mesh_resolution'],isolevel=0, return_sigma=True)
- mesh.merge_vertices()
- ms = trimesh_split(mesh, min_edge=100)
- largest_size = 0
- largest = None
- for m in ms:
- # mean = m.vertices.mean(axis=0)
- # if np.linalg.norm(mean)>=0.1*nerf.cfg['sc_factor']:
- # continue
- if m.vertices.shape[0]>largest_size:
- largest_size = m.vertices.shape[0]
- largest = m
- mesh = largest
- mesh.export(f'{self.debug_dir}/mesh_cleaned.obj')
-
- # code stuck with below
- # if get_texture:
- # mesh = nerf.mesh_texture_from_train_images(mesh, rgbs_raw=rgbs_raw, train_texture=False, tex_res=tex_res)
- np.savetxt(f"{self.debug_dir}/offset.txt", offset)
- mesh = mesh_to_real_world(mesh, pose_offset=offset, translation=self.cfg_nerf['translation'], sc_factor=self.cfg_nerf['sc_factor'])
- mesh.export(f'{self.debug_dir}/textured_mesh.obj')
-
-
-
-
-
-if __name__=="__main__":
- set_seed(0)
- torch.set_default_tensor_type('torch.cuda.FloatTensor')
-
- cfg_nerf = yaml.load(open(f"{CODE_DIR}/BundleTrack/config_ho3d.yml",'r'))
- cfg_nerf['data_dir'] = '/mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/MPM13'
- cfg_nerf['SPDLOG'] = 1
-
- cfg_track_dir = '/tmp/config.yml'
- yaml.dump(cfg_nerf, open(cfg_track_dir,'w'))
- tracker = BundleSdf(cfg_track_dir=cfg_track_dir)
- reader = Ho3dReader(tracker.bundler.yml["data_dir"].Scalar())
-
- os.system(f"rm -rf {tracker.debug_dir} && mkdir -p {tracker.debug_dir}")
-
- for i,color_file in enumerate(reader.color_files):
- color = cv2.imread(color_file)
- depth = reader.get_depth(i)
- id_str = reader.id_strs[i]
- occ_mask = reader.get_occ_mask(i)
- tracker.run(color, depth, reader.K, id_str, occ_mask=occ_mask)
-
- print("Done")
diff --git a/cnets-data-generation b/cnets-data-generation
index 70c70ac..ecb57db 160000
--- a/cnets-data-generation
+++ b/cnets-data-generation
@@ -1 +1 @@
-Subproject commit 70c70ac98979b0e10b22364d7d49794d17abf358
+Subproject commit ecb57dba28f493737f2c316cecbeb0dd3a41919d
diff --git a/contact_and_near_surface_sdf.pt b/contact_and_near_surface_sdf.pt
deleted file mode 100644
index 3b3d7ac..0000000
Binary files a/contact_and_near_surface_sdf.pt and /dev/null differ
diff --git a/contact_loss_utils.py b/contact_loss_utils.py
deleted file mode 100644
index 1ed819d..0000000
--- a/contact_loss_utils.py
+++ /dev/null
@@ -1,432 +0,0 @@
-import os
-import shutil
-from PIL import Image
-import numpy as np
-from mpl_toolkits.mplot3d import Axes3D
-import matplotlib.pyplot as plt
-from matplotlib import cm
-from matplotlib.ticker import LinearLocator, FormatStrFormatter
-import trimesh
-import open3d as o3d
-from scipy.spatial.transform import Rotation as R
-from scipy.spatial import ConvexHull, KDTree
-import yaml
-import argparse
-import torch
-
-def best_fit_transform(A, B):
- assert len(A) == len(B)
- # Compute mean of both datasets
- centroid_A = np.mean(A, axis=0)
- centroid_B = np.mean(B, axis=0)
- # Subtract mean
- AA = A - centroid_A
- BB = B - centroid_B
- # Rotation matrix
- H = np.dot(AA.T, BB)
- U, S, Vt = np.linalg.svd(H)
- R = np.dot(Vt.T, U.T)
- # Special reflection case
- if np.linalg.det(R) < 0:
- Vt[-1, :] *= -1
- R = np.dot(Vt.T, U.T)
- # Translation
- t = centroid_B.T - np.dot(R, centroid_A.T)
- # Homogeneous transformation
- T = np.identity(4)
- T[0:3, 0:3] = R
- T[0:3, 3] = t
- return T, R, t
-
-def icp(A, B, max_iterations=20, tolerance=1e-8):
- prev_error = 0
- src = np.ones((4, A.shape[0]))
- dst = np.ones((4, B.shape[0]))
- src[0:3, :] = np.copy(A.T)
- dst[0:3, :] = np.copy(B.T)
- # KDTree for fast nearest neighbor search
- tree = KDTree(B)
- distances = []
- for i in range(max_iterations):
- # Find the nearest neighbors between the current source and destination
- distances, indices = tree.query(src[:3, :].T)
- # Compute the transformation between the current source and nearest destination
- T, _, _ = best_fit_transform(src[:3, :].T, B[indices])
- # Update the current source
- src = np.dot(T, src)
- # Check for convergence
- mean_error = np.mean(distances)
- if np.abs(prev_error - mean_error) < tolerance:
- break
- prev_error = mean_error
- if isinstance(A, torch.Tensor):
- A = A.cpu().numpy()
- print('A is a tensor!!!!!!!!!!')
- else:
- print('A is not a tensor~~~~~~~~~~')
- T, _, _ = best_fit_transform(A, src[:3, :].T)
- return T, distances
-
-def setup_extrinsic(translation, axis_vec):
- """
- Convert translation and axis-angle representation to extrinsic matrix.
-
- Parameters:
- - translation: 3x1 numpy array, translation vector.
- - axis_vec: 3x1 numpy array, rotation represented in axis-angle (rodriques) form.
-
- Returns:
- - 4x4 numpy array, extrinsic matrix.
- """
- rotation_matrix = R.from_rotvec(axis_vec.ravel()).as_matrix()
- rotation_inverse = rotation_matrix.T
- translation_inverse = -rotation_inverse @ translation.ravel()
- extrinsic = np.eye(4)
- extrinsic[:3, :3] = rotation_inverse
- extrinsic[:3, 3] = translation_inverse
- return extrinsic
-
-def clean_and_denoise_mesh(mesh):
- mesh = mesh.remove_duplicated_vertices()
- mesh = mesh.remove_duplicated_triangles()
- mesh = mesh.remove_degenerate_triangles()
- mesh = mesh.remove_unreferenced_vertices()
- mesh = mesh.filter_smooth_laplacian(10, 0.5)
- return mesh
-
-def transform_points(points: torch.Tensor, transformation_matrix: torch.Tensor):
- ones = torch.ones((points.shape[0], 1))
- points_homogeneous = torch.hstack([points, ones])
- transformed_points = points_homogeneous @ transformation_matrix.T
- return transformed_points[:, :3]
-
-def to_homo(pts):
- '''
- @pts: (N,3 or 2) will homogeneliaze the last dimension
- '''
- assert len(pts.shape)==2, f'pts.shape: {pts.shape}'
- homo = np.concatenate((pts, np.ones((pts.shape[0],1))),axis=-1)
- return homo
-
-def transform_pts_to_normalized_space(contact_pts, ob_init_cam, translation, sc_factor, offset):
- ob_init_cam[:,3] = torch.tensor([0,0,0,1])
- contact_cam = transform_points(contact_pts, ob_init_cam)
-
- contact_cam = transform_points(contact_cam, offset)
- contact_cam += translation.reshape(1,3)
- contact_cam *= sc_factor
- return contact_cam
-
-def transform_dir_to_normalized_space(dir, ob_init_cam, sc_factor, offset):
- ob_init_cam[:, 3] = torch.tensor([0, 0, 0, 1])
- rotated_dir = torch.matmul(dir, ob_init_cam[:, :3].T)
- rotated_dir = torch.matmul(rotated_dir, offset[:, :3])
- scaled_dir = rotated_dir * sc_factor
- return scaled_dir
-
-def transform_mesh_to_normalized_space(gt_mesh, ob_init_cam, T, translation, sc_factor, offset):
- vertices = gt_mesh.vertices
- normals = gt_mesh.vertex_normals
- faces = gt_mesh.faces
- ob_init_cam[:, 3] = torch.tensor([0, 0, 0, 1])
- vertices = torch.from_numpy(vertices)
- transformed_vertices = transform_pts_to_normalized_space(vertices, ob_init_cam, translation, sc_factor, offset)
-
- transformed_vertices = transform_points(transformed_vertices, T)
-
- # Transform normals (apply only rotation)
- rotation_matrices = [ob_init_cam[:3, :3], offset[:3, :3], T[:3, :3]]
- normals = np.asanyarray(normals).copy()
- transformed_normals = torch.from_numpy(normals)
- for rot in rotation_matrices:
- transformed_normals = torch.matmul(transformed_normals, rot.T)
- transformed_normals = transformed_normals / torch.norm(transformed_normals)
- mesh = trimesh.Trimesh(vertices=transformed_vertices, faces=faces, vertex_normals=transformed_normals)
- # mesh.export('./transformed_mesh.obj', file_type='obj')
- return mesh
-
-def generate_contact_loss_data(path, output_path, ob_init_cam, translation, sc_factor, offset, num_surface, num_inner, num_outer, dist=0.2, separate_on_and_near=False):
- '''
- Visaulize two points clouds in C_prime frame where C_prime is BundleSDF's object body frame.
- pC_prime = TC_C_prime @ TW_C @ TB_W @ p_B.
-
- @path: body frame contact points, aka p_B
- @output_path: bundlesdf output in normalized space, aka mesh_cleaned.obj
- @ob_init_cam: object's initial pose in camera frame, aka TB_C
- @cam_extrinsic: World to camera transformation, aka TW_C
-
- '''
- offset = offset.copy()
- offset = torch.from_numpy(offset)
- # contact_pts = np.load(path)
- gt_mesh = trimesh.load(path, force='mesh')
- contact_pts = gt_mesh.sample(num_surface)
- # contact_pcd = o3d.io.read_point_cloud(path)
- # contact_pts = np.asarray(contact_pcd.points)
- # contact_pts = contact_pts[np.random.choice(contact_pts.shape[0], num_samples, replace=False)]
-
- normalized_mesh = trimesh.load(output_path, force='mesh')
- normalized_pts = normalized_mesh.sample(num_surface)
- from scipy.spatial import cKDTree
- kdtree = cKDTree(normalized_mesh.vertices)
- _, face_indices = kdtree.query(normalized_pts)
- sampled_normals = normalized_mesh.face_normals[face_indices]
- sampled_normals = np.asarray(sampled_normals).copy()
- surface_normals = torch.from_numpy(sampled_normals)
- surface_normals = transform_dir_to_normalized_space(surface_normals, ob_init_cam, sc_factor, offset)
- contact_pts = np.asarray(contact_pts).copy()
- contact_pts = torch.from_numpy(contact_pts)
- contact_cam = transform_pts_to_normalized_space(contact_pts, ob_init_cam, translation, sc_factor, offset)
-
-
- contact_pcd = o3d.geometry.PointCloud()
- contact_pcd.points = o3d.utility.Vector3dVector(contact_cam)
-
- contact_cam = contact_cam.numpy()
- T, _ = icp(contact_cam, normalized_pts) # contact_cam: perfect mesh (dair_pll), normalized_pts: bundlesdf
- contact_pcd.transform(T)
- contact_cam = np.asarray(contact_pcd.points)
- contact_cam = torch.from_numpy(contact_cam)
- # SDF values of contact points are 0
- contact_sdf_values = torch.zeros(contact_cam.shape[0])
- T = T.copy()
-
- T = torch.from_numpy(T)
- transformed_mesh = transform_mesh_to_normalized_space(gt_mesh, ob_init_cam, T, translation, sc_factor, offset)
- flatten = not separate_on_and_near
- near_surface_pts, near_surface_sdf, near_surface_normals = get_near_surface_pts_and_sdf(obj_file=transformed_mesh, num_inner=num_inner, num_outer=num_outer, distance=dist, surface_points=50000, flatten=flatten)
- ### near_surface_pts: N*2M*3, near_surface_sdf: N*2M*1, near_surface_normals: N*2M*3, N and M merged if flatten
- print(f'{near_surface_pts.shape=},{near_surface_sdf.shape=},{near_surface_normals.shape=}')
- # from vis_utils import visualize_pts_sdfs
- # visualize_pts_sdfs(contact_cam.cpu().detach().numpy(), contact_sdf_values.cpu().detach().numpy())
- # visualize_pts_sdfs(near_surface_pts.cpu().detach().numpy(), near_surface_sdf.cpu().detach().numpy())
- # save_pts(contact_cam.cpu().detach().numpy() )#, contact_sdf_values.cpu().detach().numpy())
- # save_pts(near_surface_pts.cpu().detach().numpy() )#, near_surface_sdf.cpu().detach().numpy())
-
- if separate_on_and_near:
- return contact_cam, contact_sdf_values, T, surface_normals, near_surface_pts, near_surface_sdf, near_surface_normals
- else:
- ## Concatenate contact and near-surface points
- all_pts = torch.cat((contact_cam, near_surface_pts), dim=0)
- all_sdf = torch.cat((contact_sdf_values, near_surface_sdf), dim=0)
- surface_normals = torch.cat((surface_normals, near_surface_normals), dim=0)
- return all_pts, all_sdf, T, surface_normals
-
- ## Keep only near-surface pts
- # all_pts = near_surface_pts
- # all_sdf = near_surface_sdf
-
- # all_sdf *= sc_factor
- # return all_pts, all_sdf, T, surface_normals
- ### contact_cam: N*3, contact_sdf_values: N, T: 4*4, surface_normals: N*3
- # return contact_cam, contact_sdf_values, T, surface_normals, near_surface_pts, near_surface_sdf, near_surface_normals
-
-
-def save_pts(pts):
- N = pts.shape[0]
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- idx = np.random.permutation(pts.shape[0])[:5000]
- pts = pts[idx]
- ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2],
- cmap='coolwarm', marker='o', vmin=-1, vmax=1,
- label='pts', s=2)
-
- ax.set_xlabel('X-axis')
- ax.set_ylabel('Y-axis')
- ax.set_zlabel('Z-axis')
- ax.legend()
- plt.show()
- plt.savefig(f'vis_pretrained_{N}.png')
-
-def sample_points_on_surface(mesh, number_of_points):
- """
- Sample points uniformly from the surface of the mesh.
-
- :param mesh: trimesh object
- :param number_of_points: number of points to sample
- :return: array of sampled points and their normals
- """
- points, face_indices = trimesh.sample.sample_surface(mesh, number_of_points)
- normals = mesh.face_normals[face_indices]
- points = np.asarray(points).copy()
- normals = np.asarray(normals).copy()
- return torch.from_numpy(points), torch.from_numpy(normals)
-
-
-def get_near_surface_pts_and_sdf(obj_file, distance, num_inner, num_outer, surface_points=1000, flatten=False):
- '''
- Generate sets of near-surface points and corresponding signed distances to augment contact point data in order to regularize BundleSDF's NeRF
- '''
- mesh = trimesh.load(obj_file)
- surface_pts, surface_normals = sample_points_on_surface(mesh, number_of_points=surface_points)
- sdf_points = []
- sdf_values = []
- target_normals = []
- print('Start sampling!!!!!!!!!')
- # for point, normal in zip(surface_pts, surface_normals):
- # for _ in range(num_outer):
- # d = torch.distributions.Uniform(0, distance).sample() # Random distance within the range
- # outward_point = point + d * normal
- # sdf_points.append(outward_point.unsqueeze(0))
- # sdf_values.append(d.unsqueeze(0)) # Positive SDF
- # target_normals.append(normal.unsqueeze(0))
-
- # # Points along the negative normal
- # for _ in range(num_inner):
- # d = torch.distributions.Uniform(0, distance).sample() # Random distance within the range
- # inward_point = point - d * normal
- # sdf_points.append(inward_point.unsqueeze(0))
- # sdf_values.append(-d.unsqueeze(0)) # Negative SDF
- # target_normals.append(normal.unsqueeze(0))
-
- # ### random interval
- # d_out= torch.rand(surface_pts.shape[0], 1, num_outer) * distance
- # d_in = - torch.rand(surface_pts.shape[0],1, num_inner) * distance
- ### regular interval
- d_out = torch.linspace(distance / num_outer / 2, distance, num_outer).unsqueeze(0).unsqueeze(0).repeat(surface_pts.shape[0], 1, 1)
- d_in = torch.linspace(-distance / num_outer / 2, -distance, num_outer).unsqueeze(0).unsqueeze(0).repeat(surface_pts.shape[0], 1, 1)
-
- d_out = torch.sort(d_out, dim=2, descending=True)[0]
- d_in = torch.sort(d_in, dim=2, descending=True)[0]
- print(f'{surface_pts.shape=}, {d_out.shape=}')
- outward_pts = surface_pts.unsqueeze(-1) + d_out * surface_normals.unsqueeze(-1) # N*3*M
- inward_pts = surface_pts.unsqueeze(-1) + d_in * surface_normals.unsqueeze(-1)
- sdf_points = torch.cat((outward_pts, inward_pts),dim=2) # N*3*2M
- sdf_values = torch.cat((d_out, d_in),dim=2) # N*1*2M
-
- surface_normals_out = surface_normals.unsqueeze(-1).expand_as(outward_pts)
- surface_normals_in = surface_normals.unsqueeze(-1).expand_as(inward_pts)
- surface_normals = torch.cat((surface_normals_out, surface_normals_in), dim=2) # N*3*2M
-
- surface_normals = surface_normals.permute(0,2,1) #.flatten(0,1)
- sdf_points = sdf_points.permute(0,2,1) #.flatten(0,1)
- sdf_values = sdf_values.permute(0,2,1) #.flatten()
-
- if flatten:
- surface_normals = surface_normals.flatten(0,1)
- sdf_points = sdf_points.flatten(0,1)
- sdf_values = sdf_values.flatten()
- print('Done sampling!!!!!!!!!')
- # sdf_points, sdf_values = torch.cat(sdf_points), torch.cat(sdf_values)
- # target_normals = torch.cat(target_normals)
- return sdf_points, sdf_values, surface_normals
-
-def get_transformed_obj_for_nerf_init(gt_path, output_path, ob_init_cam, translation, sc_factor, num_samples=1000):
- """
- Given a ground-truth .obj, transform to BundleSDF's normalized space for Octree initialization.
- @gt_path: path of ground-truth .obj
- @output_path: path of mesh_cleaned.obj
- @ob_init_cam: object's initial pose in camera frame, aka TB_C
- @num_samples: number of sampled points
- """
- gt_mesh = trimesh.load(gt_path, force='mesh')
- contact_pts = gt_mesh.sample(num_samples)
- contact_cam = transform_pts_to_normalized_space(contact_pts, ob_init_cam, translation, sc_factor)
-
- normalized_mesh = trimesh.load(output_path, force='mesh')
- normalized_pts = normalized_mesh.sample(num_samples)
- T, _ = icp(contact_cam, normalized_pts)
- transformed_mesh = transform_mesh_to_normalized_space(gt_mesh, ob_init_cam, T, translation, sc_factor)
- transformed_pts = transformed_mesh.sample(num_samples)
-
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- contact_pts_cpu = transformed_pts
- output_pts = normalized_pts
- ax.scatter(contact_pts_cpu[:, 0], contact_pts_cpu[:, 1], contact_pts_cpu[:, 2], color='blue', label='transformed pts')
- ax.scatter(output_pts[:,0], output_pts[:,1], output_pts[:,2], color='red', label='output pts')
- ax.set_xlabel('X axis')
- ax.set_ylabel('Y axis')
- ax.set_zlabel('Z axis')
- ax.legend()
- plt.show()
-
-def augment_contact_points(contact_pts, normals, num_samples, min_distance, max_distance):
- """Augment the contact points from contactnets due to its sparsity.
- @contact_pts: contact points from contactnets
- @normals: surface normals from contactnets
- """
- augmented_points = []
- signed_distances = []
-
- for point, normal in zip(contact_pts, normals):
- normal /= np.linalg.norm(normal)
- augmented_points.append(point)
- signed_distances.append(0)
-
- for _ in range(num_samples):
- distance = np.random.uniform(min_distance, max_distance)
-
- # Step outwards
- outward_point = point + normal * distance
- augmented_points.append(outward_point)
- signed_distances.append(distance) # Positive distance
-
- # Step inwards
- inward_point = point - normal * distance
- augmented_points.append(inward_point)
- signed_distances.append(-distance) # Negative distance
-
- return np.array(augmented_points), np.array(signed_distances)
-
-
-def visualize_pts(pts):
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
-
- # ax.scatter(contact_pts_cpu[:, 0], contact_pts_cpu[:, 1], contact_pts_cpu[:, 2], color='blue', label='contact_pts')
- ax.scatter(pts[:,0], pts[:,1], pts[:,2], color='red', label='output pts')
- ax.set_xlabel('X axis')
- ax.set_ylabel('Y axis')
- ax.set_zlabel('Z axis')
- ax.legend()
- plt.show()
-
-def generate_contact_pts(offset, translation, sc_factor, annotated_poses_dir, num_samples=3000):
- """Interface to allow BundleSDF to call.
- """
- gt_mesh = './assets/gt_cube_simple.obj'
- output_path = './assets/mesh_cleaned_cube2.obj'
- ob_init_pose = np.loadtxt(
- annotated_poses_dir + "%04i.txt" % 0
- ) # initial cube pose represented in camera frame, matching tagslam
- all_pts, all_sdf, T, surface_normals = generate_contact_loss_data(gt_mesh, output_path, ob_init_pose, translation, sc_factor, offset, num_samples)
- return all_pts, all_sdf, T, surface_normals
-
-def generate_mesh_pts(offset, translation, sc_factor, annotated_poses_dir, cnets_mesh_dir, num_surface, num_inner, num_outer, dist, separate_on_and_near=False):
- """Generate mesh nearby points and sdfs to pretrain NeRF from dair_pll mesh.
- """
- output_path = './assets/mesh_cleaned_cube2.obj' # bundlesdf reference output mesh in normalized space
- ob_init_pose = np.loadtxt(
- annotated_poses_dir + "%04i.txt" % 0
- ) # initial cube pose represented in camera frame, matching tagslam
- ob_init_pose = torch.from_numpy(ob_init_pose)
- if separate_on_and_near:
- contact_pts, contact_sdf, T, contact_normals, near_surface_pts, near_surface_sdf, near_surface_normals = \
- generate_contact_loss_data(cnets_mesh_dir, output_path, ob_init_pose, translation, sc_factor, offset, num_surface, num_inner, num_outer, dist, separate_on_and_near)
- return contact_pts, contact_sdf, T, contact_normals, near_surface_pts, near_surface_sdf, near_surface_normals
- else:
- all_pts, all_sdf, T, surface_normals = \
- generate_contact_loss_data(cnets_mesh_dir, output_path, ob_init_pose, translation, sc_factor, offset, num_surface, num_inner, num_outer, dist, separate_on_and_near)
- return all_pts, all_sdf, T, surface_normals
-
-def post_process_generated_points(points: torch.Tensor, offset, translation, sc_factor, annotated_poses_dir, normalized_mesh_path, num_samples, T):
- """Interface to convert sampled points from contactnets to normalized space.
- """
- ob_init_pose = np.loadtxt(
- annotated_poses_dir + "%04i.txt" % 0
- ) # initial cube pose represented in camera frame, matching tagslam
- ob_init_pose = torch.from_numpy(ob_init_pose)
- pts_normalized = transform_pts_to_normalized_space(points, ob_init_pose, translation, sc_factor, offset)
- contact_pcd = o3d.geometry.PointCloud()
- contact_pcd.points = o3d.utility.Vector3dVector(pts_normalized)
- if T is not None:
- pass
- else:
- mesh_cleaned = trimesh.load(normalized_mesh_path, force='mesh')
- mesh_cleaned_pts = mesh_cleaned.sample(num_samples)
- T, _ = icp(pts_normalized, mesh_cleaned_pts) # pts_normalized: input (dair_pll), mesh_cleaned_pts: bundlesdf
- contact_pcd.transform(T)
- pts_normalized_tensor = torch.from_numpy(np.asarray(contact_pcd.points)).float().cuda()
- return pts_normalized_tensor, T
diff --git a/contact_pts.pt b/contact_pts.pt
deleted file mode 100644
index fa007a4..0000000
Binary files a/contact_pts.pt and /dev/null differ
diff --git a/dair_pll b/dair_pll
index a4f596d..4a2eb58 160000
--- a/dair_pll
+++ b/dair_pll
@@ -1 +1 @@
-Subproject commit a4f596d941dd127025b1ed00edbcc488b33ef80f
+Subproject commit 4a2eb58543d7e363ebb7b417adb2af5dd23b2cd4
diff --git a/dair_pll_old/LICENSE b/dair_pll_old/LICENSE
deleted file mode 100644
index 7f1f0dc..0000000
--- a/dair_pll_old/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2022, Dynamic Autonomy and Intelligent Robotics Lab
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dair_pll_old/README.md b/dair_pll_old/README.md
deleted file mode 100644
index 02ca253..0000000
--- a/dair_pll_old/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# DAIRLab Physics-based Learning Library
-
-## Documentation
-https://dairlab.github.io/dair_pll
-
-
-## Attribution notes
-* The GitHub Action documentation build scripts are based on [Anne Gentle](https://github.com/annegentle)'s great example here: https://github.com/annegentle/create-demo
-* Some functions (such as [`rotation_matrix_from_one_vector`](https://dairlab.github.io/dair_pll/dair_pll.tensor_utils.html#dair_pll.tensor_utils.rotation_matrix_from_one_vector)) are Pytorch reimplementations of [drake](https://github.com/RobotLocomotion/drake) functionality, and are attributed accordingly in their documentation.
-* This code contains a repackaged version of the Manifold Unscented Kalman Filter developed by [Martin Brossard et al.](https://github.com/CAOR-MINES-ParisTech/ukfm)
diff --git a/dair_pll_old/assets/bundlesdf_bottle/0.pt b/dair_pll_old/assets/bundlesdf_bottle/0.pt
deleted file mode 100644
index bb7dd26..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/1.pt b/dair_pll_old/assets/bundlesdf_bottle/1.pt
deleted file mode 100644
index 046d17b..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/2.pt b/dair_pll_old/assets/bundlesdf_bottle/2.pt
deleted file mode 100644
index 5372e85..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/3.pt b/dair_pll_old/assets/bundlesdf_bottle/3.pt
deleted file mode 100644
index 4ca1dca..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/4.pt b/dair_pll_old/assets/bundlesdf_bottle/4.pt
deleted file mode 100644
index 55efc2b..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/5.pt b/dair_pll_old/assets/bundlesdf_bottle/5.pt
deleted file mode 100644
index ea14407..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/6.pt b/dair_pll_old/assets/bundlesdf_bottle/6.pt
deleted file mode 100644
index 8e633d3..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/7.pt b/dair_pll_old/assets/bundlesdf_bottle/7.pt
deleted file mode 100644
index c53dc96..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/8.pt b/dair_pll_old/assets/bundlesdf_bottle/8.pt
deleted file mode 100644
index 841f85c..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle/9.pt b/dair_pll_old/assets/bundlesdf_bottle/9.pt
deleted file mode 100644
index d5c57e9..0000000
Binary files a/dair_pll_old/assets/bundlesdf_bottle/9.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_bottle_mesh.urdf b/dair_pll_old/assets/bundlesdf_bottle_mesh.urdf
deleted file mode 100644
index cb0f2bb..0000000
--- a/dair_pll_old/assets/bundlesdf_bottle_mesh.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/bundlesdf_cube/0.pt b/dair_pll_old/assets/bundlesdf_cube/0.pt
deleted file mode 100644
index 6b51b6c..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/1.pt b/dair_pll_old/assets/bundlesdf_cube/1.pt
deleted file mode 100644
index b81f18b..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/2.pt b/dair_pll_old/assets/bundlesdf_cube/2.pt
deleted file mode 100644
index 3835c01..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/3.pt b/dair_pll_old/assets/bundlesdf_cube/3.pt
deleted file mode 100644
index f704c98..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/4.pt b/dair_pll_old/assets/bundlesdf_cube/4.pt
deleted file mode 100644
index 9306be6..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/5.pt b/dair_pll_old/assets/bundlesdf_cube/5.pt
deleted file mode 100644
index a598a9e..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/6.pt b/dair_pll_old/assets/bundlesdf_cube/6.pt
deleted file mode 100644
index 94689e6..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/7.pt b/dair_pll_old/assets/bundlesdf_cube/7.pt
deleted file mode 100644
index 6d4a1f8..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube/8.pt b/dair_pll_old/assets/bundlesdf_cube/8.pt
deleted file mode 100644
index 5117d6a..0000000
Binary files a/dair_pll_old/assets/bundlesdf_cube/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_cube_mesh.urdf b/dair_pll_old/assets/bundlesdf_cube_mesh.urdf
deleted file mode 100644
index 9139abb..0000000
--- a/dair_pll_old/assets/bundlesdf_cube_mesh.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/bundlesdf_napkin/0.pt b/dair_pll_old/assets/bundlesdf_napkin/0.pt
deleted file mode 100644
index 0556e28..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/1.pt b/dair_pll_old/assets/bundlesdf_napkin/1.pt
deleted file mode 100644
index b82d674..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/2.pt b/dair_pll_old/assets/bundlesdf_napkin/2.pt
deleted file mode 100644
index 9e60713..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/3.pt b/dair_pll_old/assets/bundlesdf_napkin/3.pt
deleted file mode 100644
index 7cec7f5..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/4.pt b/dair_pll_old/assets/bundlesdf_napkin/4.pt
deleted file mode 100644
index c48c89b..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/5.pt b/dair_pll_old/assets/bundlesdf_napkin/5.pt
deleted file mode 100644
index 3b3dae5..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/6.pt b/dair_pll_old/assets/bundlesdf_napkin/6.pt
deleted file mode 100644
index 4bfd7f2..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/7.pt b/dair_pll_old/assets/bundlesdf_napkin/7.pt
deleted file mode 100644
index 8f3b239..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/8.pt b/dair_pll_old/assets/bundlesdf_napkin/8.pt
deleted file mode 100644
index 460216d..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin/9.pt b/dair_pll_old/assets/bundlesdf_napkin/9.pt
deleted file mode 100644
index c009389..0000000
Binary files a/dair_pll_old/assets/bundlesdf_napkin/9.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_napkin_mesh.urdf b/dair_pll_old/assets/bundlesdf_napkin_mesh.urdf
deleted file mode 100644
index ab5cb96..0000000
--- a/dair_pll_old/assets/bundlesdf_napkin_mesh.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/0.pt b/dair_pll_old/assets/bundlesdf_toblerone/0.pt
deleted file mode 100644
index 4b513fa..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/1.pt b/dair_pll_old/assets/bundlesdf_toblerone/1.pt
deleted file mode 100644
index cfbbdfb..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/2.pt b/dair_pll_old/assets/bundlesdf_toblerone/2.pt
deleted file mode 100644
index 739c275..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/3.pt b/dair_pll_old/assets/bundlesdf_toblerone/3.pt
deleted file mode 100644
index 3e086fb..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/4.pt b/dair_pll_old/assets/bundlesdf_toblerone/4.pt
deleted file mode 100644
index 6ef5589..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/5.pt b/dair_pll_old/assets/bundlesdf_toblerone/5.pt
deleted file mode 100644
index 37c8bbd..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/6.pt b/dair_pll_old/assets/bundlesdf_toblerone/6.pt
deleted file mode 100644
index 3ccf988..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/7.pt b/dair_pll_old/assets/bundlesdf_toblerone/7.pt
deleted file mode 100644
index b6bfb26..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/8.pt b/dair_pll_old/assets/bundlesdf_toblerone/8.pt
deleted file mode 100644
index e8814db..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/bundlesdf_toblerone/9.pt b/dair_pll_old/assets/bundlesdf_toblerone/9.pt
deleted file mode 100644
index 9471cf1..0000000
Binary files a/dair_pll_old/assets/bundlesdf_toblerone/9.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_asymmetric.urdf b/dair_pll_old/assets/contactnets_asymmetric.urdf
deleted file mode 100644
index 1e2b2b0..0000000
--- a/dair_pll_old/assets/contactnets_asymmetric.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube.urdf b/dair_pll_old/assets/contactnets_cube.urdf
deleted file mode 100644
index ecd26d6..0000000
--- a/dair_pll_old/assets/contactnets_cube.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube/0.pt b/dair_pll_old/assets/contactnets_cube/0.pt
deleted file mode 100644
index 93efe17..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/1.pt b/dair_pll_old/assets/contactnets_cube/1.pt
deleted file mode 100644
index cb4e009..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/10.pt b/dair_pll_old/assets/contactnets_cube/10.pt
deleted file mode 100644
index 6deea1f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/10.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/100.pt b/dair_pll_old/assets/contactnets_cube/100.pt
deleted file mode 100644
index 00173f6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/100.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/101.pt b/dair_pll_old/assets/contactnets_cube/101.pt
deleted file mode 100644
index e2a7570..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/101.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/102.pt b/dair_pll_old/assets/contactnets_cube/102.pt
deleted file mode 100644
index c0205c0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/102.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/103.pt b/dair_pll_old/assets/contactnets_cube/103.pt
deleted file mode 100644
index 63e9cf6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/103.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/104.pt b/dair_pll_old/assets/contactnets_cube/104.pt
deleted file mode 100644
index 2dbe384..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/104.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/105.pt b/dair_pll_old/assets/contactnets_cube/105.pt
deleted file mode 100644
index 0c43c8c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/105.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/106.pt b/dair_pll_old/assets/contactnets_cube/106.pt
deleted file mode 100644
index 8cbd0c2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/106.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/107.pt b/dair_pll_old/assets/contactnets_cube/107.pt
deleted file mode 100644
index 10fcbe0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/107.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/108.pt b/dair_pll_old/assets/contactnets_cube/108.pt
deleted file mode 100644
index 54d613b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/108.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/109.pt b/dair_pll_old/assets/contactnets_cube/109.pt
deleted file mode 100644
index ce008f8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/109.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/11.pt b/dair_pll_old/assets/contactnets_cube/11.pt
deleted file mode 100644
index 3f6a534..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/11.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/110.pt b/dair_pll_old/assets/contactnets_cube/110.pt
deleted file mode 100644
index 22f99fb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/110.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/111.pt b/dair_pll_old/assets/contactnets_cube/111.pt
deleted file mode 100644
index dbe1437..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/111.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/112.pt b/dair_pll_old/assets/contactnets_cube/112.pt
deleted file mode 100644
index 2d769d7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/112.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/113.pt b/dair_pll_old/assets/contactnets_cube/113.pt
deleted file mode 100644
index 564e22f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/113.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/114.pt b/dair_pll_old/assets/contactnets_cube/114.pt
deleted file mode 100644
index 62b133a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/114.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/115.pt b/dair_pll_old/assets/contactnets_cube/115.pt
deleted file mode 100644
index d1dc510..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/115.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/116.pt b/dair_pll_old/assets/contactnets_cube/116.pt
deleted file mode 100644
index 75e1296..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/116.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/117.pt b/dair_pll_old/assets/contactnets_cube/117.pt
deleted file mode 100644
index 92343f7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/117.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/118.pt b/dair_pll_old/assets/contactnets_cube/118.pt
deleted file mode 100644
index 31a4cbf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/118.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/119.pt b/dair_pll_old/assets/contactnets_cube/119.pt
deleted file mode 100644
index c2279bf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/119.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/12.pt b/dair_pll_old/assets/contactnets_cube/12.pt
deleted file mode 100644
index 2517c63..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/12.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/120.pt b/dair_pll_old/assets/contactnets_cube/120.pt
deleted file mode 100644
index 9eb065c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/120.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/121.pt b/dair_pll_old/assets/contactnets_cube/121.pt
deleted file mode 100644
index b9c5d37..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/121.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/122.pt b/dair_pll_old/assets/contactnets_cube/122.pt
deleted file mode 100644
index d3906aa..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/122.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/123.pt b/dair_pll_old/assets/contactnets_cube/123.pt
deleted file mode 100644
index 2f23620..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/123.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/124.pt b/dair_pll_old/assets/contactnets_cube/124.pt
deleted file mode 100644
index 3bb6ccf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/124.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/125.pt b/dair_pll_old/assets/contactnets_cube/125.pt
deleted file mode 100644
index 33fc1e2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/125.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/126.pt b/dair_pll_old/assets/contactnets_cube/126.pt
deleted file mode 100644
index d23aedf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/126.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/127.pt b/dair_pll_old/assets/contactnets_cube/127.pt
deleted file mode 100644
index 09920f2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/127.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/128.pt b/dair_pll_old/assets/contactnets_cube/128.pt
deleted file mode 100644
index c208921..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/128.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/129.pt b/dair_pll_old/assets/contactnets_cube/129.pt
deleted file mode 100644
index 99c696f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/129.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/13.pt b/dair_pll_old/assets/contactnets_cube/13.pt
deleted file mode 100644
index 2d1a7a2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/13.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/130.pt b/dair_pll_old/assets/contactnets_cube/130.pt
deleted file mode 100644
index 1366013..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/130.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/131.pt b/dair_pll_old/assets/contactnets_cube/131.pt
deleted file mode 100644
index 8ab5793..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/131.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/132.pt b/dair_pll_old/assets/contactnets_cube/132.pt
deleted file mode 100644
index 308a262..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/132.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/133.pt b/dair_pll_old/assets/contactnets_cube/133.pt
deleted file mode 100644
index 68de1b4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/133.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/134.pt b/dair_pll_old/assets/contactnets_cube/134.pt
deleted file mode 100644
index b1bb44f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/134.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/135.pt b/dair_pll_old/assets/contactnets_cube/135.pt
deleted file mode 100644
index a8c77ab..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/135.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/136.pt b/dair_pll_old/assets/contactnets_cube/136.pt
deleted file mode 100644
index eef9ae6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/136.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/137.pt b/dair_pll_old/assets/contactnets_cube/137.pt
deleted file mode 100644
index fdeda54..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/137.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/138.pt b/dair_pll_old/assets/contactnets_cube/138.pt
deleted file mode 100644
index eeb0272..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/138.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/139.pt b/dair_pll_old/assets/contactnets_cube/139.pt
deleted file mode 100644
index 72f09df..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/139.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/14.pt b/dair_pll_old/assets/contactnets_cube/14.pt
deleted file mode 100644
index 956f899..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/14.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/140.pt b/dair_pll_old/assets/contactnets_cube/140.pt
deleted file mode 100644
index fd11d25..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/140.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/141.pt b/dair_pll_old/assets/contactnets_cube/141.pt
deleted file mode 100644
index 41842a4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/141.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/142.pt b/dair_pll_old/assets/contactnets_cube/142.pt
deleted file mode 100644
index 96f3993..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/142.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/143.pt b/dair_pll_old/assets/contactnets_cube/143.pt
deleted file mode 100644
index 730e3ba..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/143.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/144.pt b/dair_pll_old/assets/contactnets_cube/144.pt
deleted file mode 100644
index c4141bf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/144.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/145.pt b/dair_pll_old/assets/contactnets_cube/145.pt
deleted file mode 100644
index 2371ac7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/145.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/146.pt b/dair_pll_old/assets/contactnets_cube/146.pt
deleted file mode 100644
index 0c8cbf8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/146.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/147.pt b/dair_pll_old/assets/contactnets_cube/147.pt
deleted file mode 100644
index 7029f74..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/147.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/148.pt b/dair_pll_old/assets/contactnets_cube/148.pt
deleted file mode 100644
index e9af0b7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/148.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/149.pt b/dair_pll_old/assets/contactnets_cube/149.pt
deleted file mode 100644
index 025d75a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/149.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/15.pt b/dair_pll_old/assets/contactnets_cube/15.pt
deleted file mode 100644
index e5f0779..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/15.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/150.pt b/dair_pll_old/assets/contactnets_cube/150.pt
deleted file mode 100644
index fe23f14..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/150.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/151.pt b/dair_pll_old/assets/contactnets_cube/151.pt
deleted file mode 100644
index d2f7139..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/151.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/152.pt b/dair_pll_old/assets/contactnets_cube/152.pt
deleted file mode 100644
index 55f8aaf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/152.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/153.pt b/dair_pll_old/assets/contactnets_cube/153.pt
deleted file mode 100644
index efd3315..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/153.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/154.pt b/dair_pll_old/assets/contactnets_cube/154.pt
deleted file mode 100644
index 3b05272..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/154.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/155.pt b/dair_pll_old/assets/contactnets_cube/155.pt
deleted file mode 100644
index 6145019..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/155.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/156.pt b/dair_pll_old/assets/contactnets_cube/156.pt
deleted file mode 100644
index c70363f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/156.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/157.pt b/dair_pll_old/assets/contactnets_cube/157.pt
deleted file mode 100644
index 0bbf006..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/157.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/158.pt b/dair_pll_old/assets/contactnets_cube/158.pt
deleted file mode 100644
index e379ca5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/158.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/159.pt b/dair_pll_old/assets/contactnets_cube/159.pt
deleted file mode 100644
index 03455c7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/159.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/16.pt b/dair_pll_old/assets/contactnets_cube/16.pt
deleted file mode 100644
index 40eb9b1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/16.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/160.pt b/dair_pll_old/assets/contactnets_cube/160.pt
deleted file mode 100644
index 41beaac..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/160.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/161.pt b/dair_pll_old/assets/contactnets_cube/161.pt
deleted file mode 100644
index 55baf6e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/161.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/162.pt b/dair_pll_old/assets/contactnets_cube/162.pt
deleted file mode 100644
index 8529c0c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/162.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/163.pt b/dair_pll_old/assets/contactnets_cube/163.pt
deleted file mode 100644
index 5e8189d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/163.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/164.pt b/dair_pll_old/assets/contactnets_cube/164.pt
deleted file mode 100644
index f958542..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/164.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/165.pt b/dair_pll_old/assets/contactnets_cube/165.pt
deleted file mode 100644
index d4340cc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/165.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/166.pt b/dair_pll_old/assets/contactnets_cube/166.pt
deleted file mode 100644
index f6b2329..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/166.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/167.pt b/dair_pll_old/assets/contactnets_cube/167.pt
deleted file mode 100644
index 42d25f8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/167.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/168.pt b/dair_pll_old/assets/contactnets_cube/168.pt
deleted file mode 100644
index 4488dc4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/168.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/169.pt b/dair_pll_old/assets/contactnets_cube/169.pt
deleted file mode 100644
index 2a56f6b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/169.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/17.pt b/dair_pll_old/assets/contactnets_cube/17.pt
deleted file mode 100644
index e9b3bce..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/17.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/170.pt b/dair_pll_old/assets/contactnets_cube/170.pt
deleted file mode 100644
index 99c16c0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/170.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/171.pt b/dair_pll_old/assets/contactnets_cube/171.pt
deleted file mode 100644
index fd7b85a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/171.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/172.pt b/dair_pll_old/assets/contactnets_cube/172.pt
deleted file mode 100644
index 6f0b42d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/172.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/173.pt b/dair_pll_old/assets/contactnets_cube/173.pt
deleted file mode 100644
index 8fcc6d3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/173.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/174.pt b/dair_pll_old/assets/contactnets_cube/174.pt
deleted file mode 100644
index 6938a9c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/174.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/175.pt b/dair_pll_old/assets/contactnets_cube/175.pt
deleted file mode 100644
index 58f19c7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/175.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/176.pt b/dair_pll_old/assets/contactnets_cube/176.pt
deleted file mode 100644
index a8b7872..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/176.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/177.pt b/dair_pll_old/assets/contactnets_cube/177.pt
deleted file mode 100644
index 494e81d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/177.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/178.pt b/dair_pll_old/assets/contactnets_cube/178.pt
deleted file mode 100644
index cf93a6c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/178.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/179.pt b/dair_pll_old/assets/contactnets_cube/179.pt
deleted file mode 100644
index 559dc7b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/179.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/18.pt b/dair_pll_old/assets/contactnets_cube/18.pt
deleted file mode 100644
index fb296f9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/18.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/180.pt b/dair_pll_old/assets/contactnets_cube/180.pt
deleted file mode 100644
index 2adf51d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/180.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/181.pt b/dair_pll_old/assets/contactnets_cube/181.pt
deleted file mode 100644
index 28631a1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/181.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/182.pt b/dair_pll_old/assets/contactnets_cube/182.pt
deleted file mode 100644
index 0b57633..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/182.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/183.pt b/dair_pll_old/assets/contactnets_cube/183.pt
deleted file mode 100644
index 75e53ab..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/183.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/184.pt b/dair_pll_old/assets/contactnets_cube/184.pt
deleted file mode 100644
index 6ca2646..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/184.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/185.pt b/dair_pll_old/assets/contactnets_cube/185.pt
deleted file mode 100644
index 2fbf69d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/185.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/186.pt b/dair_pll_old/assets/contactnets_cube/186.pt
deleted file mode 100644
index cf16f46..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/186.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/187.pt b/dair_pll_old/assets/contactnets_cube/187.pt
deleted file mode 100644
index 9ff82e3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/187.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/188.pt b/dair_pll_old/assets/contactnets_cube/188.pt
deleted file mode 100644
index 2674727..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/188.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/189.pt b/dair_pll_old/assets/contactnets_cube/189.pt
deleted file mode 100644
index 7560ec3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/189.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/19.pt b/dair_pll_old/assets/contactnets_cube/19.pt
deleted file mode 100644
index e865070..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/19.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/190.pt b/dair_pll_old/assets/contactnets_cube/190.pt
deleted file mode 100644
index c5d9092..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/190.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/191.pt b/dair_pll_old/assets/contactnets_cube/191.pt
deleted file mode 100644
index df41876..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/191.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/192.pt b/dair_pll_old/assets/contactnets_cube/192.pt
deleted file mode 100644
index 51bd21d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/192.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/193.pt b/dair_pll_old/assets/contactnets_cube/193.pt
deleted file mode 100644
index 5531dbb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/193.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/194.pt b/dair_pll_old/assets/contactnets_cube/194.pt
deleted file mode 100644
index 4d7d181..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/194.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/195.pt b/dair_pll_old/assets/contactnets_cube/195.pt
deleted file mode 100644
index f707786..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/195.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/196.pt b/dair_pll_old/assets/contactnets_cube/196.pt
deleted file mode 100644
index 9828080..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/196.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/197.pt b/dair_pll_old/assets/contactnets_cube/197.pt
deleted file mode 100644
index 5a67ba5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/197.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/198.pt b/dair_pll_old/assets/contactnets_cube/198.pt
deleted file mode 100644
index 89a3b06..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/198.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/199.pt b/dair_pll_old/assets/contactnets_cube/199.pt
deleted file mode 100644
index 62a5b6f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/199.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/2.pt b/dair_pll_old/assets/contactnets_cube/2.pt
deleted file mode 100644
index de2f40a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/20.pt b/dair_pll_old/assets/contactnets_cube/20.pt
deleted file mode 100644
index 240b131..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/20.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/200.pt b/dair_pll_old/assets/contactnets_cube/200.pt
deleted file mode 100644
index 0eba51f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/200.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/201.pt b/dair_pll_old/assets/contactnets_cube/201.pt
deleted file mode 100644
index 708fe3b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/201.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/202.pt b/dair_pll_old/assets/contactnets_cube/202.pt
deleted file mode 100644
index 158fd1f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/202.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/203.pt b/dair_pll_old/assets/contactnets_cube/203.pt
deleted file mode 100644
index 407c40d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/203.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/204.pt b/dair_pll_old/assets/contactnets_cube/204.pt
deleted file mode 100644
index 8cadf61..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/204.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/205.pt b/dair_pll_old/assets/contactnets_cube/205.pt
deleted file mode 100644
index c0c2ed9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/205.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/206.pt b/dair_pll_old/assets/contactnets_cube/206.pt
deleted file mode 100644
index e379efd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/206.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/207.pt b/dair_pll_old/assets/contactnets_cube/207.pt
deleted file mode 100644
index f6e3d0a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/207.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/208.pt b/dair_pll_old/assets/contactnets_cube/208.pt
deleted file mode 100644
index 9b1c4ce..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/208.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/209.pt b/dair_pll_old/assets/contactnets_cube/209.pt
deleted file mode 100644
index be7fc22..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/209.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/21.pt b/dair_pll_old/assets/contactnets_cube/21.pt
deleted file mode 100644
index 33449cd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/21.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/210.pt b/dair_pll_old/assets/contactnets_cube/210.pt
deleted file mode 100644
index a2ac592..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/210.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/211.pt b/dair_pll_old/assets/contactnets_cube/211.pt
deleted file mode 100644
index 2c04782..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/211.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/212.pt b/dair_pll_old/assets/contactnets_cube/212.pt
deleted file mode 100644
index 8463747..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/212.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/213.pt b/dair_pll_old/assets/contactnets_cube/213.pt
deleted file mode 100644
index 7ce93f3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/213.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/214.pt b/dair_pll_old/assets/contactnets_cube/214.pt
deleted file mode 100644
index c71650f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/214.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/215.pt b/dair_pll_old/assets/contactnets_cube/215.pt
deleted file mode 100644
index 106a1a9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/215.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/216.pt b/dair_pll_old/assets/contactnets_cube/216.pt
deleted file mode 100644
index 8157bdf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/216.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/217.pt b/dair_pll_old/assets/contactnets_cube/217.pt
deleted file mode 100644
index d816acf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/217.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/218.pt b/dair_pll_old/assets/contactnets_cube/218.pt
deleted file mode 100644
index 927b246..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/218.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/219.pt b/dair_pll_old/assets/contactnets_cube/219.pt
deleted file mode 100644
index fee86c2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/219.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/22.pt b/dair_pll_old/assets/contactnets_cube/22.pt
deleted file mode 100644
index de8d281..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/22.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/220.pt b/dair_pll_old/assets/contactnets_cube/220.pt
deleted file mode 100644
index a514a79..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/220.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/221.pt b/dair_pll_old/assets/contactnets_cube/221.pt
deleted file mode 100644
index e265de2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/221.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/222.pt b/dair_pll_old/assets/contactnets_cube/222.pt
deleted file mode 100644
index be69578..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/222.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/223.pt b/dair_pll_old/assets/contactnets_cube/223.pt
deleted file mode 100644
index 0302498..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/223.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/224.pt b/dair_pll_old/assets/contactnets_cube/224.pt
deleted file mode 100644
index 88d7689..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/224.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/225.pt b/dair_pll_old/assets/contactnets_cube/225.pt
deleted file mode 100644
index 3fd0267..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/225.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/226.pt b/dair_pll_old/assets/contactnets_cube/226.pt
deleted file mode 100644
index e9689d0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/226.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/227.pt b/dair_pll_old/assets/contactnets_cube/227.pt
deleted file mode 100644
index c71eeac..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/227.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/228.pt b/dair_pll_old/assets/contactnets_cube/228.pt
deleted file mode 100644
index d0d1514..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/228.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/229.pt b/dair_pll_old/assets/contactnets_cube/229.pt
deleted file mode 100644
index ece25c4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/229.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/23.pt b/dair_pll_old/assets/contactnets_cube/23.pt
deleted file mode 100644
index 8b92657..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/23.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/230.pt b/dair_pll_old/assets/contactnets_cube/230.pt
deleted file mode 100644
index bcce81c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/230.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/231.pt b/dair_pll_old/assets/contactnets_cube/231.pt
deleted file mode 100644
index 3cf8db0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/231.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/232.pt b/dair_pll_old/assets/contactnets_cube/232.pt
deleted file mode 100644
index 9ef0175..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/232.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/233.pt b/dair_pll_old/assets/contactnets_cube/233.pt
deleted file mode 100644
index a3a4b4b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/233.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/234.pt b/dair_pll_old/assets/contactnets_cube/234.pt
deleted file mode 100644
index f150efd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/234.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/235.pt b/dair_pll_old/assets/contactnets_cube/235.pt
deleted file mode 100644
index 0c16552..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/235.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/236.pt b/dair_pll_old/assets/contactnets_cube/236.pt
deleted file mode 100644
index 1f97053..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/236.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/237.pt b/dair_pll_old/assets/contactnets_cube/237.pt
deleted file mode 100644
index c7ff803..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/237.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/238.pt b/dair_pll_old/assets/contactnets_cube/238.pt
deleted file mode 100644
index f732719..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/238.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/239.pt b/dair_pll_old/assets/contactnets_cube/239.pt
deleted file mode 100644
index f2888bc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/239.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/24.pt b/dair_pll_old/assets/contactnets_cube/24.pt
deleted file mode 100644
index affd230..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/24.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/240.pt b/dair_pll_old/assets/contactnets_cube/240.pt
deleted file mode 100644
index 3e1fb1f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/240.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/241.pt b/dair_pll_old/assets/contactnets_cube/241.pt
deleted file mode 100644
index b0f98ca..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/241.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/242.pt b/dair_pll_old/assets/contactnets_cube/242.pt
deleted file mode 100644
index 12a70a8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/242.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/243.pt b/dair_pll_old/assets/contactnets_cube/243.pt
deleted file mode 100644
index 28d5d0e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/243.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/244.pt b/dair_pll_old/assets/contactnets_cube/244.pt
deleted file mode 100644
index 90639f7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/244.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/245.pt b/dair_pll_old/assets/contactnets_cube/245.pt
deleted file mode 100644
index 2043e0f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/245.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/246.pt b/dair_pll_old/assets/contactnets_cube/246.pt
deleted file mode 100644
index 79b6662..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/246.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/247.pt b/dair_pll_old/assets/contactnets_cube/247.pt
deleted file mode 100644
index 7cdb3ee..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/247.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/248.pt b/dair_pll_old/assets/contactnets_cube/248.pt
deleted file mode 100644
index 82d32d8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/248.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/249.pt b/dair_pll_old/assets/contactnets_cube/249.pt
deleted file mode 100644
index 607ca20..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/249.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/25.pt b/dair_pll_old/assets/contactnets_cube/25.pt
deleted file mode 100644
index f5e66d3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/25.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/250.pt b/dair_pll_old/assets/contactnets_cube/250.pt
deleted file mode 100644
index f0b6d56..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/250.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/251.pt b/dair_pll_old/assets/contactnets_cube/251.pt
deleted file mode 100644
index fa6d9c8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/251.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/252.pt b/dair_pll_old/assets/contactnets_cube/252.pt
deleted file mode 100644
index edee6b5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/252.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/253.pt b/dair_pll_old/assets/contactnets_cube/253.pt
deleted file mode 100644
index a45e540..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/253.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/254.pt b/dair_pll_old/assets/contactnets_cube/254.pt
deleted file mode 100644
index f17280b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/254.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/255.pt b/dair_pll_old/assets/contactnets_cube/255.pt
deleted file mode 100644
index 073cae5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/255.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/256.pt b/dair_pll_old/assets/contactnets_cube/256.pt
deleted file mode 100644
index 3082f81..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/256.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/257.pt b/dair_pll_old/assets/contactnets_cube/257.pt
deleted file mode 100644
index cf5238e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/257.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/258.pt b/dair_pll_old/assets/contactnets_cube/258.pt
deleted file mode 100644
index 1422ac9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/258.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/259.pt b/dair_pll_old/assets/contactnets_cube/259.pt
deleted file mode 100644
index 8d98e10..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/259.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/26.pt b/dair_pll_old/assets/contactnets_cube/26.pt
deleted file mode 100644
index 3767b21..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/26.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/260.pt b/dair_pll_old/assets/contactnets_cube/260.pt
deleted file mode 100644
index 66c77c4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/260.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/261.pt b/dair_pll_old/assets/contactnets_cube/261.pt
deleted file mode 100644
index 27b7c5d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/261.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/262.pt b/dair_pll_old/assets/contactnets_cube/262.pt
deleted file mode 100644
index a1050d4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/262.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/263.pt b/dair_pll_old/assets/contactnets_cube/263.pt
deleted file mode 100644
index 3862cea..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/263.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/264.pt b/dair_pll_old/assets/contactnets_cube/264.pt
deleted file mode 100644
index 3798703..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/264.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/265.pt b/dair_pll_old/assets/contactnets_cube/265.pt
deleted file mode 100644
index a51fc3d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/265.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/266.pt b/dair_pll_old/assets/contactnets_cube/266.pt
deleted file mode 100644
index ef16bf0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/266.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/267.pt b/dair_pll_old/assets/contactnets_cube/267.pt
deleted file mode 100644
index c4a2221..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/267.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/268.pt b/dair_pll_old/assets/contactnets_cube/268.pt
deleted file mode 100644
index a77e039..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/268.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/269.pt b/dair_pll_old/assets/contactnets_cube/269.pt
deleted file mode 100644
index 18feae2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/269.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/27.pt b/dair_pll_old/assets/contactnets_cube/27.pt
deleted file mode 100644
index ce052f0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/27.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/270.pt b/dair_pll_old/assets/contactnets_cube/270.pt
deleted file mode 100644
index f159419..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/270.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/271.pt b/dair_pll_old/assets/contactnets_cube/271.pt
deleted file mode 100644
index df917b6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/271.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/272.pt b/dair_pll_old/assets/contactnets_cube/272.pt
deleted file mode 100644
index 832bc49..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/272.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/273.pt b/dair_pll_old/assets/contactnets_cube/273.pt
deleted file mode 100644
index 9ca45c9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/273.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/274.pt b/dair_pll_old/assets/contactnets_cube/274.pt
deleted file mode 100644
index 2dfd854..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/274.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/275.pt b/dair_pll_old/assets/contactnets_cube/275.pt
deleted file mode 100644
index 39d6586..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/275.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/276.pt b/dair_pll_old/assets/contactnets_cube/276.pt
deleted file mode 100644
index 58768c4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/276.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/277.pt b/dair_pll_old/assets/contactnets_cube/277.pt
deleted file mode 100644
index bcc7226..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/277.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/278.pt b/dair_pll_old/assets/contactnets_cube/278.pt
deleted file mode 100644
index b2dcbb5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/278.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/279.pt b/dair_pll_old/assets/contactnets_cube/279.pt
deleted file mode 100644
index 48920e0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/279.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/28.pt b/dair_pll_old/assets/contactnets_cube/28.pt
deleted file mode 100644
index c40516a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/28.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/280.pt b/dair_pll_old/assets/contactnets_cube/280.pt
deleted file mode 100644
index da11823..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/280.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/281.pt b/dair_pll_old/assets/contactnets_cube/281.pt
deleted file mode 100644
index eec60d4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/281.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/282.pt b/dair_pll_old/assets/contactnets_cube/282.pt
deleted file mode 100644
index 68c98b9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/282.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/283.pt b/dair_pll_old/assets/contactnets_cube/283.pt
deleted file mode 100644
index 4882c8f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/283.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/284.pt b/dair_pll_old/assets/contactnets_cube/284.pt
deleted file mode 100644
index 0419827..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/284.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/285.pt b/dair_pll_old/assets/contactnets_cube/285.pt
deleted file mode 100644
index 68c73d1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/285.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/286.pt b/dair_pll_old/assets/contactnets_cube/286.pt
deleted file mode 100644
index 91474eb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/286.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/287.pt b/dair_pll_old/assets/contactnets_cube/287.pt
deleted file mode 100644
index dec9e0a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/287.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/288.pt b/dair_pll_old/assets/contactnets_cube/288.pt
deleted file mode 100644
index ad49446..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/288.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/289.pt b/dair_pll_old/assets/contactnets_cube/289.pt
deleted file mode 100644
index f55d506..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/289.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/29.pt b/dair_pll_old/assets/contactnets_cube/29.pt
deleted file mode 100644
index 0a0ba9c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/29.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/290.pt b/dair_pll_old/assets/contactnets_cube/290.pt
deleted file mode 100644
index 07bb7b7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/290.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/291.pt b/dair_pll_old/assets/contactnets_cube/291.pt
deleted file mode 100644
index cf3d558..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/291.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/292.pt b/dair_pll_old/assets/contactnets_cube/292.pt
deleted file mode 100644
index 422766f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/292.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/293.pt b/dair_pll_old/assets/contactnets_cube/293.pt
deleted file mode 100644
index 3b9de53..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/293.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/294.pt b/dair_pll_old/assets/contactnets_cube/294.pt
deleted file mode 100644
index 4d2e52c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/294.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/295.pt b/dair_pll_old/assets/contactnets_cube/295.pt
deleted file mode 100644
index f5a6a2e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/295.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/296.pt b/dair_pll_old/assets/contactnets_cube/296.pt
deleted file mode 100644
index 9f3019c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/296.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/297.pt b/dair_pll_old/assets/contactnets_cube/297.pt
deleted file mode 100644
index 3d9e6ab..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/297.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/298.pt b/dair_pll_old/assets/contactnets_cube/298.pt
deleted file mode 100644
index 3751c20..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/298.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/299.pt b/dair_pll_old/assets/contactnets_cube/299.pt
deleted file mode 100644
index 1b91466..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/299.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/3.pt b/dair_pll_old/assets/contactnets_cube/3.pt
deleted file mode 100644
index 86fd32c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/30.pt b/dair_pll_old/assets/contactnets_cube/30.pt
deleted file mode 100644
index 85428de..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/30.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/300.pt b/dair_pll_old/assets/contactnets_cube/300.pt
deleted file mode 100644
index 4f129f2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/300.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/301.pt b/dair_pll_old/assets/contactnets_cube/301.pt
deleted file mode 100644
index 19da067..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/301.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/302.pt b/dair_pll_old/assets/contactnets_cube/302.pt
deleted file mode 100644
index 476239d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/302.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/303.pt b/dair_pll_old/assets/contactnets_cube/303.pt
deleted file mode 100644
index 52c3acd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/303.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/304.pt b/dair_pll_old/assets/contactnets_cube/304.pt
deleted file mode 100644
index 7906936..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/304.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/305.pt b/dair_pll_old/assets/contactnets_cube/305.pt
deleted file mode 100644
index cd68809..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/305.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/306.pt b/dair_pll_old/assets/contactnets_cube/306.pt
deleted file mode 100644
index 44d22ee..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/306.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/307.pt b/dair_pll_old/assets/contactnets_cube/307.pt
deleted file mode 100644
index b5d79da..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/307.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/308.pt b/dair_pll_old/assets/contactnets_cube/308.pt
deleted file mode 100644
index c18619b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/308.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/309.pt b/dair_pll_old/assets/contactnets_cube/309.pt
deleted file mode 100644
index a796b7c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/309.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/31.pt b/dair_pll_old/assets/contactnets_cube/31.pt
deleted file mode 100644
index bba3de0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/31.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/310.pt b/dair_pll_old/assets/contactnets_cube/310.pt
deleted file mode 100644
index 8f3c015..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/310.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/311.pt b/dair_pll_old/assets/contactnets_cube/311.pt
deleted file mode 100644
index 51b6cc7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/311.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/312.pt b/dair_pll_old/assets/contactnets_cube/312.pt
deleted file mode 100644
index 4efa364..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/312.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/313.pt b/dair_pll_old/assets/contactnets_cube/313.pt
deleted file mode 100644
index 6000988..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/313.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/314.pt b/dair_pll_old/assets/contactnets_cube/314.pt
deleted file mode 100644
index eb58183..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/314.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/315.pt b/dair_pll_old/assets/contactnets_cube/315.pt
deleted file mode 100644
index 4499a4b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/315.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/316.pt b/dair_pll_old/assets/contactnets_cube/316.pt
deleted file mode 100644
index 915aef1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/316.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/317.pt b/dair_pll_old/assets/contactnets_cube/317.pt
deleted file mode 100644
index ed99553..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/317.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/318.pt b/dair_pll_old/assets/contactnets_cube/318.pt
deleted file mode 100644
index 019c3d2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/318.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/319.pt b/dair_pll_old/assets/contactnets_cube/319.pt
deleted file mode 100644
index 9849fe8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/319.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/32.pt b/dair_pll_old/assets/contactnets_cube/32.pt
deleted file mode 100644
index 406bf5a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/32.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/320.pt b/dair_pll_old/assets/contactnets_cube/320.pt
deleted file mode 100644
index 905a412..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/320.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/321.pt b/dair_pll_old/assets/contactnets_cube/321.pt
deleted file mode 100644
index bd8b670..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/321.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/322.pt b/dair_pll_old/assets/contactnets_cube/322.pt
deleted file mode 100644
index c81b14b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/322.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/323.pt b/dair_pll_old/assets/contactnets_cube/323.pt
deleted file mode 100644
index 553014c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/323.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/324.pt b/dair_pll_old/assets/contactnets_cube/324.pt
deleted file mode 100644
index 8ef915b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/324.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/325.pt b/dair_pll_old/assets/contactnets_cube/325.pt
deleted file mode 100644
index be97fd0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/325.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/326.pt b/dair_pll_old/assets/contactnets_cube/326.pt
deleted file mode 100644
index 63ba8f6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/326.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/327.pt b/dair_pll_old/assets/contactnets_cube/327.pt
deleted file mode 100644
index 14b1429..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/327.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/328.pt b/dair_pll_old/assets/contactnets_cube/328.pt
deleted file mode 100644
index 8f267cf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/328.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/329.pt b/dair_pll_old/assets/contactnets_cube/329.pt
deleted file mode 100644
index b547b76..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/329.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/33.pt b/dair_pll_old/assets/contactnets_cube/33.pt
deleted file mode 100644
index b64ad37..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/33.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/330.pt b/dair_pll_old/assets/contactnets_cube/330.pt
deleted file mode 100644
index 1ef9908..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/330.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/331.pt b/dair_pll_old/assets/contactnets_cube/331.pt
deleted file mode 100644
index 285efbe..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/331.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/332.pt b/dair_pll_old/assets/contactnets_cube/332.pt
deleted file mode 100644
index 02814a4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/332.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/333.pt b/dair_pll_old/assets/contactnets_cube/333.pt
deleted file mode 100644
index 3b1765a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/333.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/334.pt b/dair_pll_old/assets/contactnets_cube/334.pt
deleted file mode 100644
index 9adbcf8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/334.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/335.pt b/dair_pll_old/assets/contactnets_cube/335.pt
deleted file mode 100644
index e04eec8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/335.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/336.pt b/dair_pll_old/assets/contactnets_cube/336.pt
deleted file mode 100644
index abbf510..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/336.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/337.pt b/dair_pll_old/assets/contactnets_cube/337.pt
deleted file mode 100644
index 1332edc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/337.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/338.pt b/dair_pll_old/assets/contactnets_cube/338.pt
deleted file mode 100644
index 4c28321..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/338.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/339.pt b/dair_pll_old/assets/contactnets_cube/339.pt
deleted file mode 100644
index 36560ed..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/339.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/34.pt b/dair_pll_old/assets/contactnets_cube/34.pt
deleted file mode 100644
index 7824fdd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/34.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/340.pt b/dair_pll_old/assets/contactnets_cube/340.pt
deleted file mode 100644
index 574604e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/340.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/341.pt b/dair_pll_old/assets/contactnets_cube/341.pt
deleted file mode 100644
index 0e730da..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/341.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/342.pt b/dair_pll_old/assets/contactnets_cube/342.pt
deleted file mode 100644
index 2bde70c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/342.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/343.pt b/dair_pll_old/assets/contactnets_cube/343.pt
deleted file mode 100644
index 78e66ca..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/343.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/344.pt b/dair_pll_old/assets/contactnets_cube/344.pt
deleted file mode 100644
index 0feb151..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/344.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/345.pt b/dair_pll_old/assets/contactnets_cube/345.pt
deleted file mode 100644
index 4b3cff1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/345.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/346.pt b/dair_pll_old/assets/contactnets_cube/346.pt
deleted file mode 100644
index 195fb54..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/346.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/347.pt b/dair_pll_old/assets/contactnets_cube/347.pt
deleted file mode 100644
index bdfd7d8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/347.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/348.pt b/dair_pll_old/assets/contactnets_cube/348.pt
deleted file mode 100644
index 8083bf1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/348.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/349.pt b/dair_pll_old/assets/contactnets_cube/349.pt
deleted file mode 100644
index 5e48e8c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/349.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/35.pt b/dair_pll_old/assets/contactnets_cube/35.pt
deleted file mode 100644
index 4c98be9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/35.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/350.pt b/dair_pll_old/assets/contactnets_cube/350.pt
deleted file mode 100644
index f7d9191..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/350.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/351.pt b/dair_pll_old/assets/contactnets_cube/351.pt
deleted file mode 100644
index 89e60a4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/351.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/352.pt b/dair_pll_old/assets/contactnets_cube/352.pt
deleted file mode 100644
index 0d8d33b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/352.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/353.pt b/dair_pll_old/assets/contactnets_cube/353.pt
deleted file mode 100644
index 30192a3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/353.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/354.pt b/dair_pll_old/assets/contactnets_cube/354.pt
deleted file mode 100644
index 707d043..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/354.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/355.pt b/dair_pll_old/assets/contactnets_cube/355.pt
deleted file mode 100644
index de1f83e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/355.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/356.pt b/dair_pll_old/assets/contactnets_cube/356.pt
deleted file mode 100644
index 753b111..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/356.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/357.pt b/dair_pll_old/assets/contactnets_cube/357.pt
deleted file mode 100644
index 13c4eba..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/357.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/358.pt b/dair_pll_old/assets/contactnets_cube/358.pt
deleted file mode 100644
index e0217d2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/358.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/359.pt b/dair_pll_old/assets/contactnets_cube/359.pt
deleted file mode 100644
index 8ca33da..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/359.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/36.pt b/dair_pll_old/assets/contactnets_cube/36.pt
deleted file mode 100644
index ccc5cbe..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/36.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/360.pt b/dair_pll_old/assets/contactnets_cube/360.pt
deleted file mode 100644
index 35a4ca3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/360.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/361.pt b/dair_pll_old/assets/contactnets_cube/361.pt
deleted file mode 100644
index 690e3da..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/361.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/362.pt b/dair_pll_old/assets/contactnets_cube/362.pt
deleted file mode 100644
index 6a08062..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/362.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/363.pt b/dair_pll_old/assets/contactnets_cube/363.pt
deleted file mode 100644
index 0ade5a8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/363.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/364.pt b/dair_pll_old/assets/contactnets_cube/364.pt
deleted file mode 100644
index acaa0d3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/364.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/365.pt b/dair_pll_old/assets/contactnets_cube/365.pt
deleted file mode 100644
index 9c9910f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/365.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/366.pt b/dair_pll_old/assets/contactnets_cube/366.pt
deleted file mode 100644
index 89c9535..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/366.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/367.pt b/dair_pll_old/assets/contactnets_cube/367.pt
deleted file mode 100644
index 0c30bfb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/367.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/368.pt b/dair_pll_old/assets/contactnets_cube/368.pt
deleted file mode 100644
index d85f186..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/368.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/369.pt b/dair_pll_old/assets/contactnets_cube/369.pt
deleted file mode 100644
index 3f1e32f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/369.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/37.pt b/dair_pll_old/assets/contactnets_cube/37.pt
deleted file mode 100644
index fa037cd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/37.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/370.pt b/dair_pll_old/assets/contactnets_cube/370.pt
deleted file mode 100644
index 5b85b9c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/370.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/371.pt b/dair_pll_old/assets/contactnets_cube/371.pt
deleted file mode 100644
index a6dbd41..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/371.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/372.pt b/dair_pll_old/assets/contactnets_cube/372.pt
deleted file mode 100644
index a43d035..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/372.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/373.pt b/dair_pll_old/assets/contactnets_cube/373.pt
deleted file mode 100644
index c32bfa9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/373.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/374.pt b/dair_pll_old/assets/contactnets_cube/374.pt
deleted file mode 100644
index c815c36..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/374.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/375.pt b/dair_pll_old/assets/contactnets_cube/375.pt
deleted file mode 100644
index 4788061..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/375.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/376.pt b/dair_pll_old/assets/contactnets_cube/376.pt
deleted file mode 100644
index d4728c2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/376.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/377.pt b/dair_pll_old/assets/contactnets_cube/377.pt
deleted file mode 100644
index 5c9bf27..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/377.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/378.pt b/dair_pll_old/assets/contactnets_cube/378.pt
deleted file mode 100644
index 3750129..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/378.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/379.pt b/dair_pll_old/assets/contactnets_cube/379.pt
deleted file mode 100644
index 8c23dc3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/379.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/38.pt b/dair_pll_old/assets/contactnets_cube/38.pt
deleted file mode 100644
index 5f6437c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/38.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/380.pt b/dair_pll_old/assets/contactnets_cube/380.pt
deleted file mode 100644
index a8874dc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/380.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/381.pt b/dair_pll_old/assets/contactnets_cube/381.pt
deleted file mode 100644
index 6164182..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/381.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/382.pt b/dair_pll_old/assets/contactnets_cube/382.pt
deleted file mode 100644
index d79d3d1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/382.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/383.pt b/dair_pll_old/assets/contactnets_cube/383.pt
deleted file mode 100644
index 1710d0c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/383.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/384.pt b/dair_pll_old/assets/contactnets_cube/384.pt
deleted file mode 100644
index d32fb74..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/384.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/385.pt b/dair_pll_old/assets/contactnets_cube/385.pt
deleted file mode 100644
index 6c061bd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/385.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/386.pt b/dair_pll_old/assets/contactnets_cube/386.pt
deleted file mode 100644
index b9ffde7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/386.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/387.pt b/dair_pll_old/assets/contactnets_cube/387.pt
deleted file mode 100644
index 814e0d1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/387.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/388.pt b/dair_pll_old/assets/contactnets_cube/388.pt
deleted file mode 100644
index a5eb71a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/388.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/389.pt b/dair_pll_old/assets/contactnets_cube/389.pt
deleted file mode 100644
index f24d7f4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/389.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/39.pt b/dair_pll_old/assets/contactnets_cube/39.pt
deleted file mode 100644
index 841af9a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/39.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/390.pt b/dair_pll_old/assets/contactnets_cube/390.pt
deleted file mode 100644
index 11b7cf8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/390.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/391.pt b/dair_pll_old/assets/contactnets_cube/391.pt
deleted file mode 100644
index d4298ff..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/391.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/392.pt b/dair_pll_old/assets/contactnets_cube/392.pt
deleted file mode 100644
index 4b23c14..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/392.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/393.pt b/dair_pll_old/assets/contactnets_cube/393.pt
deleted file mode 100644
index b0ddd16..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/393.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/394.pt b/dair_pll_old/assets/contactnets_cube/394.pt
deleted file mode 100644
index fd56a47..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/394.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/395.pt b/dair_pll_old/assets/contactnets_cube/395.pt
deleted file mode 100644
index 35cf217..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/395.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/396.pt b/dair_pll_old/assets/contactnets_cube/396.pt
deleted file mode 100644
index 18c7751..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/396.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/397.pt b/dair_pll_old/assets/contactnets_cube/397.pt
deleted file mode 100644
index 5396ede..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/397.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/398.pt b/dair_pll_old/assets/contactnets_cube/398.pt
deleted file mode 100644
index 9113b31..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/398.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/399.pt b/dair_pll_old/assets/contactnets_cube/399.pt
deleted file mode 100644
index e3a96fb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/399.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/4.pt b/dair_pll_old/assets/contactnets_cube/4.pt
deleted file mode 100644
index 3e1f824..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/40.pt b/dair_pll_old/assets/contactnets_cube/40.pt
deleted file mode 100644
index 61f985a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/40.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/400.pt b/dair_pll_old/assets/contactnets_cube/400.pt
deleted file mode 100644
index ef4afe3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/400.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/401.pt b/dair_pll_old/assets/contactnets_cube/401.pt
deleted file mode 100644
index 67d26cb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/401.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/402.pt b/dair_pll_old/assets/contactnets_cube/402.pt
deleted file mode 100644
index a9b9b47..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/402.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/403.pt b/dair_pll_old/assets/contactnets_cube/403.pt
deleted file mode 100644
index 7fb7bac..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/403.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/404.pt b/dair_pll_old/assets/contactnets_cube/404.pt
deleted file mode 100644
index 7d16b5e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/404.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/405.pt b/dair_pll_old/assets/contactnets_cube/405.pt
deleted file mode 100644
index e620b29..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/405.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/406.pt b/dair_pll_old/assets/contactnets_cube/406.pt
deleted file mode 100644
index e0f8267..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/406.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/407.pt b/dair_pll_old/assets/contactnets_cube/407.pt
deleted file mode 100644
index 2d731ce..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/407.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/408.pt b/dair_pll_old/assets/contactnets_cube/408.pt
deleted file mode 100644
index abdb36e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/408.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/409.pt b/dair_pll_old/assets/contactnets_cube/409.pt
deleted file mode 100644
index e86ff24..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/409.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/41.pt b/dair_pll_old/assets/contactnets_cube/41.pt
deleted file mode 100644
index e8b8cb9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/41.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/410.pt b/dair_pll_old/assets/contactnets_cube/410.pt
deleted file mode 100644
index 3ae0061..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/410.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/411.pt b/dair_pll_old/assets/contactnets_cube/411.pt
deleted file mode 100644
index 02a9dd0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/411.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/412.pt b/dair_pll_old/assets/contactnets_cube/412.pt
deleted file mode 100644
index c65833f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/412.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/413.pt b/dair_pll_old/assets/contactnets_cube/413.pt
deleted file mode 100644
index 0017742..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/413.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/414.pt b/dair_pll_old/assets/contactnets_cube/414.pt
deleted file mode 100644
index 555b1fe..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/414.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/415.pt b/dair_pll_old/assets/contactnets_cube/415.pt
deleted file mode 100644
index a9ef38d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/415.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/416.pt b/dair_pll_old/assets/contactnets_cube/416.pt
deleted file mode 100644
index 089546d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/416.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/417.pt b/dair_pll_old/assets/contactnets_cube/417.pt
deleted file mode 100644
index 386f151..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/417.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/418.pt b/dair_pll_old/assets/contactnets_cube/418.pt
deleted file mode 100644
index 492ba3a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/418.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/419.pt b/dair_pll_old/assets/contactnets_cube/419.pt
deleted file mode 100644
index 965c35f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/419.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/42.pt b/dair_pll_old/assets/contactnets_cube/42.pt
deleted file mode 100644
index 460526c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/42.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/420.pt b/dair_pll_old/assets/contactnets_cube/420.pt
deleted file mode 100644
index 603eb98..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/420.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/421.pt b/dair_pll_old/assets/contactnets_cube/421.pt
deleted file mode 100644
index af62b78..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/421.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/422.pt b/dair_pll_old/assets/contactnets_cube/422.pt
deleted file mode 100644
index cca83f9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/422.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/423.pt b/dair_pll_old/assets/contactnets_cube/423.pt
deleted file mode 100644
index ee08854..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/423.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/424.pt b/dair_pll_old/assets/contactnets_cube/424.pt
deleted file mode 100644
index ff133da..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/424.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/425.pt b/dair_pll_old/assets/contactnets_cube/425.pt
deleted file mode 100644
index 0375ee7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/425.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/426.pt b/dair_pll_old/assets/contactnets_cube/426.pt
deleted file mode 100644
index 2222ce7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/426.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/427.pt b/dair_pll_old/assets/contactnets_cube/427.pt
deleted file mode 100644
index 0a4cdfa..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/427.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/428.pt b/dair_pll_old/assets/contactnets_cube/428.pt
deleted file mode 100644
index 276e8db..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/428.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/429.pt b/dair_pll_old/assets/contactnets_cube/429.pt
deleted file mode 100644
index a7ae9ea..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/429.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/43.pt b/dair_pll_old/assets/contactnets_cube/43.pt
deleted file mode 100644
index ed534ae..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/43.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/430.pt b/dair_pll_old/assets/contactnets_cube/430.pt
deleted file mode 100644
index 44c54f6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/430.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/431.pt b/dair_pll_old/assets/contactnets_cube/431.pt
deleted file mode 100644
index 550ad67..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/431.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/432.pt b/dair_pll_old/assets/contactnets_cube/432.pt
deleted file mode 100644
index 19e3ec4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/432.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/433.pt b/dair_pll_old/assets/contactnets_cube/433.pt
deleted file mode 100644
index caa31e7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/433.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/434.pt b/dair_pll_old/assets/contactnets_cube/434.pt
deleted file mode 100644
index ff95f3f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/434.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/435.pt b/dair_pll_old/assets/contactnets_cube/435.pt
deleted file mode 100644
index 941aefc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/435.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/436.pt b/dair_pll_old/assets/contactnets_cube/436.pt
deleted file mode 100644
index 69d7e7b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/436.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/437.pt b/dair_pll_old/assets/contactnets_cube/437.pt
deleted file mode 100644
index 0b9dd44..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/437.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/438.pt b/dair_pll_old/assets/contactnets_cube/438.pt
deleted file mode 100644
index 21f18fc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/438.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/439.pt b/dair_pll_old/assets/contactnets_cube/439.pt
deleted file mode 100644
index 71fecb6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/439.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/44.pt b/dair_pll_old/assets/contactnets_cube/44.pt
deleted file mode 100644
index dc22ae8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/44.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/440.pt b/dair_pll_old/assets/contactnets_cube/440.pt
deleted file mode 100644
index 4b850b7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/440.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/441.pt b/dair_pll_old/assets/contactnets_cube/441.pt
deleted file mode 100644
index 7bb7e5e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/441.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/442.pt b/dair_pll_old/assets/contactnets_cube/442.pt
deleted file mode 100644
index f5875c0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/442.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/443.pt b/dair_pll_old/assets/contactnets_cube/443.pt
deleted file mode 100644
index a254ac7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/443.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/444.pt b/dair_pll_old/assets/contactnets_cube/444.pt
deleted file mode 100644
index 8c31d32..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/444.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/445.pt b/dair_pll_old/assets/contactnets_cube/445.pt
deleted file mode 100644
index ed46ac5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/445.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/446.pt b/dair_pll_old/assets/contactnets_cube/446.pt
deleted file mode 100644
index 4fe65a6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/446.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/447.pt b/dair_pll_old/assets/contactnets_cube/447.pt
deleted file mode 100644
index ab4728e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/447.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/448.pt b/dair_pll_old/assets/contactnets_cube/448.pt
deleted file mode 100644
index 251f651..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/448.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/449.pt b/dair_pll_old/assets/contactnets_cube/449.pt
deleted file mode 100644
index 7904ed3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/449.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/45.pt b/dair_pll_old/assets/contactnets_cube/45.pt
deleted file mode 100644
index 386360d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/45.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/450.pt b/dair_pll_old/assets/contactnets_cube/450.pt
deleted file mode 100644
index ddaf702..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/450.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/451.pt b/dair_pll_old/assets/contactnets_cube/451.pt
deleted file mode 100644
index 7ef106d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/451.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/452.pt b/dair_pll_old/assets/contactnets_cube/452.pt
deleted file mode 100644
index 10c358a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/452.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/453.pt b/dair_pll_old/assets/contactnets_cube/453.pt
deleted file mode 100644
index 89f571c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/453.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/454.pt b/dair_pll_old/assets/contactnets_cube/454.pt
deleted file mode 100644
index 5fa973a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/454.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/455.pt b/dair_pll_old/assets/contactnets_cube/455.pt
deleted file mode 100644
index cdd96f5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/455.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/456.pt b/dair_pll_old/assets/contactnets_cube/456.pt
deleted file mode 100644
index 05b0c70..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/456.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/457.pt b/dair_pll_old/assets/contactnets_cube/457.pt
deleted file mode 100644
index 641efe1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/457.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/458.pt b/dair_pll_old/assets/contactnets_cube/458.pt
deleted file mode 100644
index 3ee1fce..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/458.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/459.pt b/dair_pll_old/assets/contactnets_cube/459.pt
deleted file mode 100644
index 037107d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/459.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/46.pt b/dair_pll_old/assets/contactnets_cube/46.pt
deleted file mode 100644
index 5739c6d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/46.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/460.pt b/dair_pll_old/assets/contactnets_cube/460.pt
deleted file mode 100644
index cc18582..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/460.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/461.pt b/dair_pll_old/assets/contactnets_cube/461.pt
deleted file mode 100644
index 32176af..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/461.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/462.pt b/dair_pll_old/assets/contactnets_cube/462.pt
deleted file mode 100644
index 0f1af93..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/462.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/463.pt b/dair_pll_old/assets/contactnets_cube/463.pt
deleted file mode 100644
index 878717f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/463.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/464.pt b/dair_pll_old/assets/contactnets_cube/464.pt
deleted file mode 100644
index 19b4824..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/464.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/465.pt b/dair_pll_old/assets/contactnets_cube/465.pt
deleted file mode 100644
index 1c4fa2e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/465.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/466.pt b/dair_pll_old/assets/contactnets_cube/466.pt
deleted file mode 100644
index b15e39a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/466.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/467.pt b/dair_pll_old/assets/contactnets_cube/467.pt
deleted file mode 100644
index 865c808..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/467.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/468.pt b/dair_pll_old/assets/contactnets_cube/468.pt
deleted file mode 100644
index c3e5ac0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/468.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/469.pt b/dair_pll_old/assets/contactnets_cube/469.pt
deleted file mode 100644
index 6c2dd4a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/469.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/47.pt b/dair_pll_old/assets/contactnets_cube/47.pt
deleted file mode 100644
index 64b4ccc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/47.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/470.pt b/dair_pll_old/assets/contactnets_cube/470.pt
deleted file mode 100644
index 6e81967..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/470.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/471.pt b/dair_pll_old/assets/contactnets_cube/471.pt
deleted file mode 100644
index 76c150c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/471.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/472.pt b/dair_pll_old/assets/contactnets_cube/472.pt
deleted file mode 100644
index c62d9aa..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/472.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/473.pt b/dair_pll_old/assets/contactnets_cube/473.pt
deleted file mode 100644
index 6c27f33..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/473.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/474.pt b/dair_pll_old/assets/contactnets_cube/474.pt
deleted file mode 100644
index 5ba3b99..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/474.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/475.pt b/dair_pll_old/assets/contactnets_cube/475.pt
deleted file mode 100644
index a86c1c2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/475.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/476.pt b/dair_pll_old/assets/contactnets_cube/476.pt
deleted file mode 100644
index 7b9c11b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/476.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/477.pt b/dair_pll_old/assets/contactnets_cube/477.pt
deleted file mode 100644
index b3feeec..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/477.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/478.pt b/dair_pll_old/assets/contactnets_cube/478.pt
deleted file mode 100644
index 639d6be..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/478.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/479.pt b/dair_pll_old/assets/contactnets_cube/479.pt
deleted file mode 100644
index a0e062e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/479.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/48.pt b/dair_pll_old/assets/contactnets_cube/48.pt
deleted file mode 100644
index 5ecee70..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/48.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/480.pt b/dair_pll_old/assets/contactnets_cube/480.pt
deleted file mode 100644
index 49f5ff9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/480.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/481.pt b/dair_pll_old/assets/contactnets_cube/481.pt
deleted file mode 100644
index b6524d0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/481.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/482.pt b/dair_pll_old/assets/contactnets_cube/482.pt
deleted file mode 100644
index ebd8db5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/482.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/483.pt b/dair_pll_old/assets/contactnets_cube/483.pt
deleted file mode 100644
index e764835..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/483.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/484.pt b/dair_pll_old/assets/contactnets_cube/484.pt
deleted file mode 100644
index dd55a93..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/484.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/485.pt b/dair_pll_old/assets/contactnets_cube/485.pt
deleted file mode 100644
index 96aa70a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/485.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/486.pt b/dair_pll_old/assets/contactnets_cube/486.pt
deleted file mode 100644
index 70bc711..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/486.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/487.pt b/dair_pll_old/assets/contactnets_cube/487.pt
deleted file mode 100644
index 206b874..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/487.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/488.pt b/dair_pll_old/assets/contactnets_cube/488.pt
deleted file mode 100644
index a4af521..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/488.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/489.pt b/dair_pll_old/assets/contactnets_cube/489.pt
deleted file mode 100644
index 8a6c134..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/489.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/49.pt b/dair_pll_old/assets/contactnets_cube/49.pt
deleted file mode 100644
index 126692a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/49.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/490.pt b/dair_pll_old/assets/contactnets_cube/490.pt
deleted file mode 100644
index 7042ec8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/490.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/491.pt b/dair_pll_old/assets/contactnets_cube/491.pt
deleted file mode 100644
index a65e14b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/491.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/492.pt b/dair_pll_old/assets/contactnets_cube/492.pt
deleted file mode 100644
index 9af25a8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/492.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/493.pt b/dair_pll_old/assets/contactnets_cube/493.pt
deleted file mode 100644
index f706520..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/493.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/494.pt b/dair_pll_old/assets/contactnets_cube/494.pt
deleted file mode 100644
index 18e59fb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/494.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/495.pt b/dair_pll_old/assets/contactnets_cube/495.pt
deleted file mode 100644
index 0266588..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/495.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/496.pt b/dair_pll_old/assets/contactnets_cube/496.pt
deleted file mode 100644
index d57b8fa..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/496.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/497.pt b/dair_pll_old/assets/contactnets_cube/497.pt
deleted file mode 100644
index 13ff7b2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/497.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/498.pt b/dair_pll_old/assets/contactnets_cube/498.pt
deleted file mode 100644
index 3442d4e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/498.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/499.pt b/dair_pll_old/assets/contactnets_cube/499.pt
deleted file mode 100644
index ff7304b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/499.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/5.pt b/dair_pll_old/assets/contactnets_cube/5.pt
deleted file mode 100644
index 098d3c2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/50.pt b/dair_pll_old/assets/contactnets_cube/50.pt
deleted file mode 100644
index 6113f24..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/50.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/500.pt b/dair_pll_old/assets/contactnets_cube/500.pt
deleted file mode 100644
index d67e282..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/500.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/501.pt b/dair_pll_old/assets/contactnets_cube/501.pt
deleted file mode 100644
index 1e62f83..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/501.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/502.pt b/dair_pll_old/assets/contactnets_cube/502.pt
deleted file mode 100644
index 994f37f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/502.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/503.pt b/dair_pll_old/assets/contactnets_cube/503.pt
deleted file mode 100644
index 96ca4f6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/503.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/504.pt b/dair_pll_old/assets/contactnets_cube/504.pt
deleted file mode 100644
index 0814c9d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/504.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/505.pt b/dair_pll_old/assets/contactnets_cube/505.pt
deleted file mode 100644
index 3107737..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/505.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/506.pt b/dair_pll_old/assets/contactnets_cube/506.pt
deleted file mode 100644
index b8d2c55..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/506.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/507.pt b/dair_pll_old/assets/contactnets_cube/507.pt
deleted file mode 100644
index 0fa5b4c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/507.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/508.pt b/dair_pll_old/assets/contactnets_cube/508.pt
deleted file mode 100644
index 4d7c421..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/508.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/509.pt b/dair_pll_old/assets/contactnets_cube/509.pt
deleted file mode 100644
index 1f45dde..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/509.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/51.pt b/dair_pll_old/assets/contactnets_cube/51.pt
deleted file mode 100644
index df83426..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/51.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/510.pt b/dair_pll_old/assets/contactnets_cube/510.pt
deleted file mode 100644
index 005ddc2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/510.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/511.pt b/dair_pll_old/assets/contactnets_cube/511.pt
deleted file mode 100644
index e870fb5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/511.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/512.pt b/dair_pll_old/assets/contactnets_cube/512.pt
deleted file mode 100644
index 8ebe8a3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/512.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/513.pt b/dair_pll_old/assets/contactnets_cube/513.pt
deleted file mode 100644
index 521a29c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/513.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/514.pt b/dair_pll_old/assets/contactnets_cube/514.pt
deleted file mode 100644
index 2fd84c5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/514.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/515.pt b/dair_pll_old/assets/contactnets_cube/515.pt
deleted file mode 100644
index 0ae0629..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/515.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/516.pt b/dair_pll_old/assets/contactnets_cube/516.pt
deleted file mode 100644
index 754efc2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/516.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/517.pt b/dair_pll_old/assets/contactnets_cube/517.pt
deleted file mode 100644
index 687c7cc..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/517.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/518.pt b/dair_pll_old/assets/contactnets_cube/518.pt
deleted file mode 100644
index 0a0526f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/518.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/519.pt b/dair_pll_old/assets/contactnets_cube/519.pt
deleted file mode 100644
index 9603f71..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/519.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/52.pt b/dair_pll_old/assets/contactnets_cube/52.pt
deleted file mode 100644
index 31a8df9..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/52.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/520.pt b/dair_pll_old/assets/contactnets_cube/520.pt
deleted file mode 100644
index 5d01eee..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/520.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/521.pt b/dair_pll_old/assets/contactnets_cube/521.pt
deleted file mode 100644
index 78937b8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/521.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/522.pt b/dair_pll_old/assets/contactnets_cube/522.pt
deleted file mode 100644
index b07009e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/522.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/523.pt b/dair_pll_old/assets/contactnets_cube/523.pt
deleted file mode 100644
index 1490086..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/523.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/524.pt b/dair_pll_old/assets/contactnets_cube/524.pt
deleted file mode 100644
index a875dda..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/524.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/525.pt b/dair_pll_old/assets/contactnets_cube/525.pt
deleted file mode 100644
index e744147..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/525.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/526.pt b/dair_pll_old/assets/contactnets_cube/526.pt
deleted file mode 100644
index 848f3d0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/526.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/527.pt b/dair_pll_old/assets/contactnets_cube/527.pt
deleted file mode 100644
index 2f311af..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/527.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/528.pt b/dair_pll_old/assets/contactnets_cube/528.pt
deleted file mode 100644
index f854b10..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/528.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/529.pt b/dair_pll_old/assets/contactnets_cube/529.pt
deleted file mode 100644
index 9ac2efd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/529.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/53.pt b/dair_pll_old/assets/contactnets_cube/53.pt
deleted file mode 100644
index e39224e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/53.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/530.pt b/dair_pll_old/assets/contactnets_cube/530.pt
deleted file mode 100644
index d6fc73d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/530.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/531.pt b/dair_pll_old/assets/contactnets_cube/531.pt
deleted file mode 100644
index fa2eb33..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/531.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/532.pt b/dair_pll_old/assets/contactnets_cube/532.pt
deleted file mode 100644
index 66467e0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/532.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/533.pt b/dair_pll_old/assets/contactnets_cube/533.pt
deleted file mode 100644
index 2dd0175..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/533.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/534.pt b/dair_pll_old/assets/contactnets_cube/534.pt
deleted file mode 100644
index bad798b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/534.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/535.pt b/dair_pll_old/assets/contactnets_cube/535.pt
deleted file mode 100644
index 8a57fce..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/535.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/536.pt b/dair_pll_old/assets/contactnets_cube/536.pt
deleted file mode 100644
index 9b3dbaa..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/536.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/537.pt b/dair_pll_old/assets/contactnets_cube/537.pt
deleted file mode 100644
index 390eb80..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/537.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/538.pt b/dair_pll_old/assets/contactnets_cube/538.pt
deleted file mode 100644
index ff4c1ca..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/538.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/539.pt b/dair_pll_old/assets/contactnets_cube/539.pt
deleted file mode 100644
index 5a3bdcd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/539.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/54.pt b/dair_pll_old/assets/contactnets_cube/54.pt
deleted file mode 100644
index f3d9237..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/54.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/540.pt b/dair_pll_old/assets/contactnets_cube/540.pt
deleted file mode 100644
index 906b20a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/540.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/541.pt b/dair_pll_old/assets/contactnets_cube/541.pt
deleted file mode 100644
index 95921b2..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/541.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/542.pt b/dair_pll_old/assets/contactnets_cube/542.pt
deleted file mode 100644
index a6e1e00..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/542.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/543.pt b/dair_pll_old/assets/contactnets_cube/543.pt
deleted file mode 100644
index 3d31eaf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/543.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/544.pt b/dair_pll_old/assets/contactnets_cube/544.pt
deleted file mode 100644
index 9e0de36..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/544.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/545.pt b/dair_pll_old/assets/contactnets_cube/545.pt
deleted file mode 100644
index 7b1d797..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/545.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/546.pt b/dair_pll_old/assets/contactnets_cube/546.pt
deleted file mode 100644
index bf5a395..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/546.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/547.pt b/dair_pll_old/assets/contactnets_cube/547.pt
deleted file mode 100644
index eed2bc7..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/547.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/548.pt b/dair_pll_old/assets/contactnets_cube/548.pt
deleted file mode 100644
index 9dc40fb..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/548.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/549.pt b/dair_pll_old/assets/contactnets_cube/549.pt
deleted file mode 100644
index 35dda19..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/549.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/55.pt b/dair_pll_old/assets/contactnets_cube/55.pt
deleted file mode 100644
index 4447907..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/55.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/56.pt b/dair_pll_old/assets/contactnets_cube/56.pt
deleted file mode 100644
index aec73f8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/56.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/57.pt b/dair_pll_old/assets/contactnets_cube/57.pt
deleted file mode 100644
index 02b3d25..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/57.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/58.pt b/dair_pll_old/assets/contactnets_cube/58.pt
deleted file mode 100644
index a92f314..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/58.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/59.pt b/dair_pll_old/assets/contactnets_cube/59.pt
deleted file mode 100644
index 56293a6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/59.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/6.pt b/dair_pll_old/assets/contactnets_cube/6.pt
deleted file mode 100644
index 769cc1b..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/60.pt b/dair_pll_old/assets/contactnets_cube/60.pt
deleted file mode 100644
index fd9c14c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/60.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/61.pt b/dair_pll_old/assets/contactnets_cube/61.pt
deleted file mode 100644
index 61d3e23..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/61.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/62.pt b/dair_pll_old/assets/contactnets_cube/62.pt
deleted file mode 100644
index 47f4edf..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/62.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/63.pt b/dair_pll_old/assets/contactnets_cube/63.pt
deleted file mode 100644
index 46e65e3..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/63.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/64.pt b/dair_pll_old/assets/contactnets_cube/64.pt
deleted file mode 100644
index e71f830..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/64.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/65.pt b/dair_pll_old/assets/contactnets_cube/65.pt
deleted file mode 100644
index a6982b4..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/65.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/66.pt b/dair_pll_old/assets/contactnets_cube/66.pt
deleted file mode 100644
index 9157c2a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/66.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/67.pt b/dair_pll_old/assets/contactnets_cube/67.pt
deleted file mode 100644
index df22791..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/67.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/68.pt b/dair_pll_old/assets/contactnets_cube/68.pt
deleted file mode 100644
index d2c3c82..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/68.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/69.pt b/dair_pll_old/assets/contactnets_cube/69.pt
deleted file mode 100644
index b50c9b6..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/69.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/7.pt b/dair_pll_old/assets/contactnets_cube/7.pt
deleted file mode 100644
index 162aa80..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/70.pt b/dair_pll_old/assets/contactnets_cube/70.pt
deleted file mode 100644
index e483942..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/70.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/71.pt b/dair_pll_old/assets/contactnets_cube/71.pt
deleted file mode 100644
index 9e2b7bd..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/71.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/72.pt b/dair_pll_old/assets/contactnets_cube/72.pt
deleted file mode 100644
index 6a77b94..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/72.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/73.pt b/dair_pll_old/assets/contactnets_cube/73.pt
deleted file mode 100644
index e65d93e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/73.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/74.pt b/dair_pll_old/assets/contactnets_cube/74.pt
deleted file mode 100644
index bcb556d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/74.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/75.pt b/dair_pll_old/assets/contactnets_cube/75.pt
deleted file mode 100644
index 4de11ac..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/75.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/76.pt b/dair_pll_old/assets/contactnets_cube/76.pt
deleted file mode 100644
index ed8b809..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/76.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/77.pt b/dair_pll_old/assets/contactnets_cube/77.pt
deleted file mode 100644
index 7374b66..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/77.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/78.pt b/dair_pll_old/assets/contactnets_cube/78.pt
deleted file mode 100644
index 878b114..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/78.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/79.pt b/dair_pll_old/assets/contactnets_cube/79.pt
deleted file mode 100644
index 862e107..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/79.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/8.pt b/dair_pll_old/assets/contactnets_cube/8.pt
deleted file mode 100644
index 21f57d0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/80.pt b/dair_pll_old/assets/contactnets_cube/80.pt
deleted file mode 100644
index 8d2110a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/80.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/81.pt b/dair_pll_old/assets/contactnets_cube/81.pt
deleted file mode 100644
index 33b9871..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/81.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/82.pt b/dair_pll_old/assets/contactnets_cube/82.pt
deleted file mode 100644
index aeb275c..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/82.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/83.pt b/dair_pll_old/assets/contactnets_cube/83.pt
deleted file mode 100644
index 95e2d14..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/83.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/84.pt b/dair_pll_old/assets/contactnets_cube/84.pt
deleted file mode 100644
index 0c7d2b0..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/84.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/85.pt b/dair_pll_old/assets/contactnets_cube/85.pt
deleted file mode 100644
index fcaa8b8..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/85.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/86.pt b/dair_pll_old/assets/contactnets_cube/86.pt
deleted file mode 100644
index 5d27794..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/86.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/87.pt b/dair_pll_old/assets/contactnets_cube/87.pt
deleted file mode 100644
index d543a2f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/87.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/88.pt b/dair_pll_old/assets/contactnets_cube/88.pt
deleted file mode 100644
index 03aa50f..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/88.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/89.pt b/dair_pll_old/assets/contactnets_cube/89.pt
deleted file mode 100644
index e26eae5..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/89.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/9.pt b/dair_pll_old/assets/contactnets_cube/9.pt
deleted file mode 100644
index 2af408a..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/9.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/90.pt b/dair_pll_old/assets/contactnets_cube/90.pt
deleted file mode 100644
index e131d0d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/90.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/91.pt b/dair_pll_old/assets/contactnets_cube/91.pt
deleted file mode 100644
index 41ea894..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/91.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/92.pt b/dair_pll_old/assets/contactnets_cube/92.pt
deleted file mode 100644
index 62e523e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/92.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/93.pt b/dair_pll_old/assets/contactnets_cube/93.pt
deleted file mode 100644
index 1f4b852..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/93.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/94.pt b/dair_pll_old/assets/contactnets_cube/94.pt
deleted file mode 100644
index 5c73989..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/94.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/95.pt b/dair_pll_old/assets/contactnets_cube/95.pt
deleted file mode 100644
index fbc2f3e..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/95.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/96.pt b/dair_pll_old/assets/contactnets_cube/96.pt
deleted file mode 100644
index 2083fa1..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/96.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/97.pt b/dair_pll_old/assets/contactnets_cube/97.pt
deleted file mode 100644
index df57297..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/97.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/98.pt b/dair_pll_old/assets/contactnets_cube/98.pt
deleted file mode 100644
index 1265a06..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/98.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube/99.pt b/dair_pll_old/assets/contactnets_cube/99.pt
deleted file mode 100644
index 07b1d4d..0000000
Binary files a/dair_pll_old/assets/contactnets_cube/99.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_cube_bad_init.urdf b/dair_pll_old/assets/contactnets_cube_bad_init.urdf
deleted file mode 100644
index d84914a..0000000
--- a/dair_pll_old/assets/contactnets_cube_bad_init.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube_mesh.urdf b/dair_pll_old/assets/contactnets_cube_mesh.urdf
deleted file mode 100644
index bc6a363..0000000
--- a/dair_pll_old/assets/contactnets_cube_mesh.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube_mesh_small_init.urdf b/dair_pll_old/assets/contactnets_cube_mesh_small_init.urdf
deleted file mode 100644
index 29afb7c..0000000
--- a/dair_pll_old/assets/contactnets_cube_mesh_small_init.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube_sim.urdf b/dair_pll_old/assets/contactnets_cube_sim.urdf
deleted file mode 100644
index 0152d79..0000000
--- a/dair_pll_old/assets/contactnets_cube_sim.urdf
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/contactnets_cube_small_init.urdf b/dair_pll_old/assets/contactnets_cube_small_init.urdf
deleted file mode 100644
index 8f7c636..0000000
--- a/dair_pll_old/assets/contactnets_cube_small_init.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/contactnets_elbow.urdf b/dair_pll_old/assets/contactnets_elbow.urdf
deleted file mode 100644
index 35302ff..0000000
--- a/dair_pll_old/assets/contactnets_elbow.urdf
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/contactnets_elbow/0.pt b/dair_pll_old/assets/contactnets_elbow/0.pt
deleted file mode 100644
index 43af9f8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/0.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/1.pt b/dair_pll_old/assets/contactnets_elbow/1.pt
deleted file mode 100644
index a37c0a2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/1.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/10.pt b/dair_pll_old/assets/contactnets_elbow/10.pt
deleted file mode 100644
index 73ab5f6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/10.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/100.pt b/dair_pll_old/assets/contactnets_elbow/100.pt
deleted file mode 100644
index 00b22f8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/100.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/101.pt b/dair_pll_old/assets/contactnets_elbow/101.pt
deleted file mode 100644
index 82e8986..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/101.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/102.pt b/dair_pll_old/assets/contactnets_elbow/102.pt
deleted file mode 100644
index 2072d63..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/102.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/103.pt b/dair_pll_old/assets/contactnets_elbow/103.pt
deleted file mode 100644
index f8a1da8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/103.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/104.pt b/dair_pll_old/assets/contactnets_elbow/104.pt
deleted file mode 100644
index c8187a0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/104.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/105.pt b/dair_pll_old/assets/contactnets_elbow/105.pt
deleted file mode 100644
index dadc188..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/105.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/106.pt b/dair_pll_old/assets/contactnets_elbow/106.pt
deleted file mode 100644
index 693880a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/106.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/107.pt b/dair_pll_old/assets/contactnets_elbow/107.pt
deleted file mode 100644
index ca20fa6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/107.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/108.pt b/dair_pll_old/assets/contactnets_elbow/108.pt
deleted file mode 100644
index 18ccf8f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/108.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/109.pt b/dair_pll_old/assets/contactnets_elbow/109.pt
deleted file mode 100644
index 202e9fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/109.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/11.pt b/dair_pll_old/assets/contactnets_elbow/11.pt
deleted file mode 100644
index 03cd825..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/11.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/110.pt b/dair_pll_old/assets/contactnets_elbow/110.pt
deleted file mode 100644
index a72b264..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/110.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/111.pt b/dair_pll_old/assets/contactnets_elbow/111.pt
deleted file mode 100644
index 52a0264..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/111.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/112.pt b/dair_pll_old/assets/contactnets_elbow/112.pt
deleted file mode 100644
index ad2f0c8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/112.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/113.pt b/dair_pll_old/assets/contactnets_elbow/113.pt
deleted file mode 100644
index ee520ec..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/113.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/114.pt b/dair_pll_old/assets/contactnets_elbow/114.pt
deleted file mode 100644
index dca84e0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/114.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/115.pt b/dair_pll_old/assets/contactnets_elbow/115.pt
deleted file mode 100644
index a137d63..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/115.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/116.pt b/dair_pll_old/assets/contactnets_elbow/116.pt
deleted file mode 100644
index d72cc07..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/116.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/117.pt b/dair_pll_old/assets/contactnets_elbow/117.pt
deleted file mode 100644
index 8e804eb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/117.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/118.pt b/dair_pll_old/assets/contactnets_elbow/118.pt
deleted file mode 100644
index fb84ae5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/118.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/119.pt b/dair_pll_old/assets/contactnets_elbow/119.pt
deleted file mode 100644
index 45e781a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/119.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/12.pt b/dair_pll_old/assets/contactnets_elbow/12.pt
deleted file mode 100644
index ccb7c5c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/12.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/120.pt b/dair_pll_old/assets/contactnets_elbow/120.pt
deleted file mode 100644
index f920a35..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/120.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/121.pt b/dair_pll_old/assets/contactnets_elbow/121.pt
deleted file mode 100644
index 9691390..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/121.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/122.pt b/dair_pll_old/assets/contactnets_elbow/122.pt
deleted file mode 100644
index e56a1b7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/122.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/123.pt b/dair_pll_old/assets/contactnets_elbow/123.pt
deleted file mode 100644
index ade5b5f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/123.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/124.pt b/dair_pll_old/assets/contactnets_elbow/124.pt
deleted file mode 100644
index 8f09c5e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/124.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/125.pt b/dair_pll_old/assets/contactnets_elbow/125.pt
deleted file mode 100644
index fa5af73..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/125.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/126.pt b/dair_pll_old/assets/contactnets_elbow/126.pt
deleted file mode 100644
index 434fe8e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/126.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/127.pt b/dair_pll_old/assets/contactnets_elbow/127.pt
deleted file mode 100644
index 6ade915..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/127.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/128.pt b/dair_pll_old/assets/contactnets_elbow/128.pt
deleted file mode 100644
index 785827c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/128.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/129.pt b/dair_pll_old/assets/contactnets_elbow/129.pt
deleted file mode 100644
index 5c795fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/129.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/13.pt b/dair_pll_old/assets/contactnets_elbow/13.pt
deleted file mode 100644
index 8f5fb2f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/13.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/130.pt b/dair_pll_old/assets/contactnets_elbow/130.pt
deleted file mode 100644
index af14a61..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/130.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/131.pt b/dair_pll_old/assets/contactnets_elbow/131.pt
deleted file mode 100644
index ee662f4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/131.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/132.pt b/dair_pll_old/assets/contactnets_elbow/132.pt
deleted file mode 100644
index d27cf59..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/132.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/133.pt b/dair_pll_old/assets/contactnets_elbow/133.pt
deleted file mode 100644
index b29dd15..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/133.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/134.pt b/dair_pll_old/assets/contactnets_elbow/134.pt
deleted file mode 100644
index 9af2404..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/134.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/135.pt b/dair_pll_old/assets/contactnets_elbow/135.pt
deleted file mode 100644
index f63dd0d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/135.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/136.pt b/dair_pll_old/assets/contactnets_elbow/136.pt
deleted file mode 100644
index b676b78..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/136.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/137.pt b/dair_pll_old/assets/contactnets_elbow/137.pt
deleted file mode 100644
index 89c1664..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/137.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/138.pt b/dair_pll_old/assets/contactnets_elbow/138.pt
deleted file mode 100644
index c3fcc28..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/138.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/139.pt b/dair_pll_old/assets/contactnets_elbow/139.pt
deleted file mode 100644
index dfdf142..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/139.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/14.pt b/dair_pll_old/assets/contactnets_elbow/14.pt
deleted file mode 100644
index 9af0a63..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/14.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/140.pt b/dair_pll_old/assets/contactnets_elbow/140.pt
deleted file mode 100644
index e4f274c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/140.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/141.pt b/dair_pll_old/assets/contactnets_elbow/141.pt
deleted file mode 100644
index a491d34..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/141.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/142.pt b/dair_pll_old/assets/contactnets_elbow/142.pt
deleted file mode 100644
index c11bec6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/142.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/143.pt b/dair_pll_old/assets/contactnets_elbow/143.pt
deleted file mode 100644
index fdf90bd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/143.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/144.pt b/dair_pll_old/assets/contactnets_elbow/144.pt
deleted file mode 100644
index f9e8b8e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/144.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/145.pt b/dair_pll_old/assets/contactnets_elbow/145.pt
deleted file mode 100644
index 3ace2b1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/145.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/146.pt b/dair_pll_old/assets/contactnets_elbow/146.pt
deleted file mode 100644
index 701a3d4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/146.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/147.pt b/dair_pll_old/assets/contactnets_elbow/147.pt
deleted file mode 100644
index 5de5f28..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/147.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/148.pt b/dair_pll_old/assets/contactnets_elbow/148.pt
deleted file mode 100644
index 6684fea..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/148.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/149.pt b/dair_pll_old/assets/contactnets_elbow/149.pt
deleted file mode 100644
index 6a87ccb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/149.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/15.pt b/dair_pll_old/assets/contactnets_elbow/15.pt
deleted file mode 100644
index 283a20c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/15.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/150.pt b/dair_pll_old/assets/contactnets_elbow/150.pt
deleted file mode 100644
index 4736a3f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/150.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/151.pt b/dair_pll_old/assets/contactnets_elbow/151.pt
deleted file mode 100644
index bec19ee..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/151.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/152.pt b/dair_pll_old/assets/contactnets_elbow/152.pt
deleted file mode 100644
index 57f925d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/152.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/153.pt b/dair_pll_old/assets/contactnets_elbow/153.pt
deleted file mode 100644
index 4314ed8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/153.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/154.pt b/dair_pll_old/assets/contactnets_elbow/154.pt
deleted file mode 100644
index 3f8324c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/154.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/155.pt b/dair_pll_old/assets/contactnets_elbow/155.pt
deleted file mode 100644
index 2b30120..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/155.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/156.pt b/dair_pll_old/assets/contactnets_elbow/156.pt
deleted file mode 100644
index 7928593..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/156.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/157.pt b/dair_pll_old/assets/contactnets_elbow/157.pt
deleted file mode 100644
index 3edf215..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/157.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/158.pt b/dair_pll_old/assets/contactnets_elbow/158.pt
deleted file mode 100644
index 5645568..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/158.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/159.pt b/dair_pll_old/assets/contactnets_elbow/159.pt
deleted file mode 100644
index 82d8022..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/159.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/16.pt b/dair_pll_old/assets/contactnets_elbow/16.pt
deleted file mode 100644
index 033c3cb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/16.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/160.pt b/dair_pll_old/assets/contactnets_elbow/160.pt
deleted file mode 100644
index 2a9fc41..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/160.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/161.pt b/dair_pll_old/assets/contactnets_elbow/161.pt
deleted file mode 100644
index a11cf1d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/161.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/162.pt b/dair_pll_old/assets/contactnets_elbow/162.pt
deleted file mode 100644
index aafb3bc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/162.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/163.pt b/dair_pll_old/assets/contactnets_elbow/163.pt
deleted file mode 100644
index 89bd89d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/163.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/164.pt b/dair_pll_old/assets/contactnets_elbow/164.pt
deleted file mode 100644
index 8adce04..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/164.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/165.pt b/dair_pll_old/assets/contactnets_elbow/165.pt
deleted file mode 100644
index 44380fc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/165.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/166.pt b/dair_pll_old/assets/contactnets_elbow/166.pt
deleted file mode 100644
index ed616e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/166.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/167.pt b/dair_pll_old/assets/contactnets_elbow/167.pt
deleted file mode 100644
index 5422d02..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/167.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/168.pt b/dair_pll_old/assets/contactnets_elbow/168.pt
deleted file mode 100644
index 080144e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/168.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/169.pt b/dair_pll_old/assets/contactnets_elbow/169.pt
deleted file mode 100644
index 0e10626..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/169.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/17.pt b/dair_pll_old/assets/contactnets_elbow/17.pt
deleted file mode 100644
index cc11a4e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/17.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/170.pt b/dair_pll_old/assets/contactnets_elbow/170.pt
deleted file mode 100644
index e3d4b0e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/170.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/171.pt b/dair_pll_old/assets/contactnets_elbow/171.pt
deleted file mode 100644
index 90cfdfc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/171.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/172.pt b/dair_pll_old/assets/contactnets_elbow/172.pt
deleted file mode 100644
index df62548..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/172.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/173.pt b/dair_pll_old/assets/contactnets_elbow/173.pt
deleted file mode 100644
index 7f71586..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/173.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/174.pt b/dair_pll_old/assets/contactnets_elbow/174.pt
deleted file mode 100644
index be40b8b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/174.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/175.pt b/dair_pll_old/assets/contactnets_elbow/175.pt
deleted file mode 100644
index 3c78088..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/175.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/176.pt b/dair_pll_old/assets/contactnets_elbow/176.pt
deleted file mode 100644
index c65af3c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/176.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/177.pt b/dair_pll_old/assets/contactnets_elbow/177.pt
deleted file mode 100644
index 770461a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/177.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/178.pt b/dair_pll_old/assets/contactnets_elbow/178.pt
deleted file mode 100644
index 655c058..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/178.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/179.pt b/dair_pll_old/assets/contactnets_elbow/179.pt
deleted file mode 100644
index e7915dd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/179.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/18.pt b/dair_pll_old/assets/contactnets_elbow/18.pt
deleted file mode 100644
index c53d4a0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/18.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/180.pt b/dair_pll_old/assets/contactnets_elbow/180.pt
deleted file mode 100644
index 6cb676c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/180.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/181.pt b/dair_pll_old/assets/contactnets_elbow/181.pt
deleted file mode 100644
index 452d87e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/181.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/182.pt b/dair_pll_old/assets/contactnets_elbow/182.pt
deleted file mode 100644
index 174326a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/182.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/183.pt b/dair_pll_old/assets/contactnets_elbow/183.pt
deleted file mode 100644
index 29b04cc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/183.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/184.pt b/dair_pll_old/assets/contactnets_elbow/184.pt
deleted file mode 100644
index 38f8ad7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/184.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/185.pt b/dair_pll_old/assets/contactnets_elbow/185.pt
deleted file mode 100644
index 0ebab4e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/185.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/186.pt b/dair_pll_old/assets/contactnets_elbow/186.pt
deleted file mode 100644
index f64f376..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/186.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/187.pt b/dair_pll_old/assets/contactnets_elbow/187.pt
deleted file mode 100644
index af65ec2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/187.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/188.pt b/dair_pll_old/assets/contactnets_elbow/188.pt
deleted file mode 100644
index 35a7ef8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/188.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/189.pt b/dair_pll_old/assets/contactnets_elbow/189.pt
deleted file mode 100644
index 45d156d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/189.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/19.pt b/dair_pll_old/assets/contactnets_elbow/19.pt
deleted file mode 100644
index 89bf1d9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/19.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/190.pt b/dair_pll_old/assets/contactnets_elbow/190.pt
deleted file mode 100644
index effda2b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/190.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/191.pt b/dair_pll_old/assets/contactnets_elbow/191.pt
deleted file mode 100644
index 40e0c63..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/191.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/192.pt b/dair_pll_old/assets/contactnets_elbow/192.pt
deleted file mode 100644
index 0dfa66e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/192.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/193.pt b/dair_pll_old/assets/contactnets_elbow/193.pt
deleted file mode 100644
index 64a21b4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/193.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/194.pt b/dair_pll_old/assets/contactnets_elbow/194.pt
deleted file mode 100644
index 2d5660b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/194.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/195.pt b/dair_pll_old/assets/contactnets_elbow/195.pt
deleted file mode 100644
index 7276d23..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/195.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/196.pt b/dair_pll_old/assets/contactnets_elbow/196.pt
deleted file mode 100644
index e1807d5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/196.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/197.pt b/dair_pll_old/assets/contactnets_elbow/197.pt
deleted file mode 100644
index 53ac163..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/197.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/198.pt b/dair_pll_old/assets/contactnets_elbow/198.pt
deleted file mode 100644
index 8c8264b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/198.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/199.pt b/dair_pll_old/assets/contactnets_elbow/199.pt
deleted file mode 100644
index c36ad73..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/199.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/2.pt b/dair_pll_old/assets/contactnets_elbow/2.pt
deleted file mode 100644
index b2462c6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/2.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/20.pt b/dair_pll_old/assets/contactnets_elbow/20.pt
deleted file mode 100644
index 9c39532..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/20.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/200.pt b/dair_pll_old/assets/contactnets_elbow/200.pt
deleted file mode 100644
index a0dd798..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/200.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/201.pt b/dair_pll_old/assets/contactnets_elbow/201.pt
deleted file mode 100644
index 5f52b02..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/201.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/202.pt b/dair_pll_old/assets/contactnets_elbow/202.pt
deleted file mode 100644
index 7fb1854..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/202.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/203.pt b/dair_pll_old/assets/contactnets_elbow/203.pt
deleted file mode 100644
index da190ca..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/203.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/204.pt b/dair_pll_old/assets/contactnets_elbow/204.pt
deleted file mode 100644
index e14d6b7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/204.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/205.pt b/dair_pll_old/assets/contactnets_elbow/205.pt
deleted file mode 100644
index 1bb3c1d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/205.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/206.pt b/dair_pll_old/assets/contactnets_elbow/206.pt
deleted file mode 100644
index 85af8d1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/206.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/207.pt b/dair_pll_old/assets/contactnets_elbow/207.pt
deleted file mode 100644
index 89929f3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/207.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/208.pt b/dair_pll_old/assets/contactnets_elbow/208.pt
deleted file mode 100644
index 62cd959..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/208.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/209.pt b/dair_pll_old/assets/contactnets_elbow/209.pt
deleted file mode 100644
index 9e80842..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/209.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/21.pt b/dair_pll_old/assets/contactnets_elbow/21.pt
deleted file mode 100644
index 4b6d8c8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/21.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/210.pt b/dair_pll_old/assets/contactnets_elbow/210.pt
deleted file mode 100644
index 8e88cf3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/210.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/211.pt b/dair_pll_old/assets/contactnets_elbow/211.pt
deleted file mode 100644
index 8a59401..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/211.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/212.pt b/dair_pll_old/assets/contactnets_elbow/212.pt
deleted file mode 100644
index 12d559b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/212.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/213.pt b/dair_pll_old/assets/contactnets_elbow/213.pt
deleted file mode 100644
index ea36e85..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/213.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/214.pt b/dair_pll_old/assets/contactnets_elbow/214.pt
deleted file mode 100644
index 678146e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/214.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/215.pt b/dair_pll_old/assets/contactnets_elbow/215.pt
deleted file mode 100644
index 410a4f7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/215.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/216.pt b/dair_pll_old/assets/contactnets_elbow/216.pt
deleted file mode 100644
index 94514fc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/216.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/217.pt b/dair_pll_old/assets/contactnets_elbow/217.pt
deleted file mode 100644
index 82b23da..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/217.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/218.pt b/dair_pll_old/assets/contactnets_elbow/218.pt
deleted file mode 100644
index d9db375..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/218.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/219.pt b/dair_pll_old/assets/contactnets_elbow/219.pt
deleted file mode 100644
index 0b3d85d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/219.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/22.pt b/dair_pll_old/assets/contactnets_elbow/22.pt
deleted file mode 100644
index f481ba0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/22.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/220.pt b/dair_pll_old/assets/contactnets_elbow/220.pt
deleted file mode 100644
index 3c092d8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/220.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/221.pt b/dair_pll_old/assets/contactnets_elbow/221.pt
deleted file mode 100644
index 0201baf..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/221.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/222.pt b/dair_pll_old/assets/contactnets_elbow/222.pt
deleted file mode 100644
index 8eebbe0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/222.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/223.pt b/dair_pll_old/assets/contactnets_elbow/223.pt
deleted file mode 100644
index e38c07f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/223.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/224.pt b/dair_pll_old/assets/contactnets_elbow/224.pt
deleted file mode 100644
index c872aaa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/224.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/225.pt b/dair_pll_old/assets/contactnets_elbow/225.pt
deleted file mode 100644
index 27b8b5c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/225.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/226.pt b/dair_pll_old/assets/contactnets_elbow/226.pt
deleted file mode 100644
index eeabd1e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/226.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/227.pt b/dair_pll_old/assets/contactnets_elbow/227.pt
deleted file mode 100644
index eab21d9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/227.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/228.pt b/dair_pll_old/assets/contactnets_elbow/228.pt
deleted file mode 100644
index b5cddef..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/228.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/229.pt b/dair_pll_old/assets/contactnets_elbow/229.pt
deleted file mode 100644
index aabf158..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/229.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/23.pt b/dair_pll_old/assets/contactnets_elbow/23.pt
deleted file mode 100644
index 84fbecf..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/23.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/230.pt b/dair_pll_old/assets/contactnets_elbow/230.pt
deleted file mode 100644
index 90726fb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/230.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/231.pt b/dair_pll_old/assets/contactnets_elbow/231.pt
deleted file mode 100644
index d94177f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/231.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/232.pt b/dair_pll_old/assets/contactnets_elbow/232.pt
deleted file mode 100644
index 92185bb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/232.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/233.pt b/dair_pll_old/assets/contactnets_elbow/233.pt
deleted file mode 100644
index 7e183fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/233.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/234.pt b/dair_pll_old/assets/contactnets_elbow/234.pt
deleted file mode 100644
index 4ec9105..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/234.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/235.pt b/dair_pll_old/assets/contactnets_elbow/235.pt
deleted file mode 100644
index 5d93795..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/235.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/236.pt b/dair_pll_old/assets/contactnets_elbow/236.pt
deleted file mode 100644
index abb4866..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/236.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/237.pt b/dair_pll_old/assets/contactnets_elbow/237.pt
deleted file mode 100644
index 3493e20..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/237.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/238.pt b/dair_pll_old/assets/contactnets_elbow/238.pt
deleted file mode 100644
index 6b673fe..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/238.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/239.pt b/dair_pll_old/assets/contactnets_elbow/239.pt
deleted file mode 100644
index c5215ea..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/239.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/24.pt b/dair_pll_old/assets/contactnets_elbow/24.pt
deleted file mode 100644
index d18ee7c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/24.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/240.pt b/dair_pll_old/assets/contactnets_elbow/240.pt
deleted file mode 100644
index df45052..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/240.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/241.pt b/dair_pll_old/assets/contactnets_elbow/241.pt
deleted file mode 100644
index f12fd17..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/241.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/242.pt b/dair_pll_old/assets/contactnets_elbow/242.pt
deleted file mode 100644
index 5ec9352..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/242.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/243.pt b/dair_pll_old/assets/contactnets_elbow/243.pt
deleted file mode 100644
index 9243676..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/243.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/244.pt b/dair_pll_old/assets/contactnets_elbow/244.pt
deleted file mode 100644
index 4a13d0f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/244.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/245.pt b/dair_pll_old/assets/contactnets_elbow/245.pt
deleted file mode 100644
index f01d085..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/245.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/246.pt b/dair_pll_old/assets/contactnets_elbow/246.pt
deleted file mode 100644
index 6c40fe1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/246.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/247.pt b/dair_pll_old/assets/contactnets_elbow/247.pt
deleted file mode 100644
index 5776fe7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/247.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/248.pt b/dair_pll_old/assets/contactnets_elbow/248.pt
deleted file mode 100644
index 353c9ae..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/248.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/249.pt b/dair_pll_old/assets/contactnets_elbow/249.pt
deleted file mode 100644
index e0b6328..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/249.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/25.pt b/dair_pll_old/assets/contactnets_elbow/25.pt
deleted file mode 100644
index ff5c327..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/25.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/250.pt b/dair_pll_old/assets/contactnets_elbow/250.pt
deleted file mode 100644
index b322f1e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/250.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/251.pt b/dair_pll_old/assets/contactnets_elbow/251.pt
deleted file mode 100644
index e1ab30f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/251.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/252.pt b/dair_pll_old/assets/contactnets_elbow/252.pt
deleted file mode 100644
index ba4356c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/252.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/253.pt b/dair_pll_old/assets/contactnets_elbow/253.pt
deleted file mode 100644
index 429264a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/253.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/254.pt b/dair_pll_old/assets/contactnets_elbow/254.pt
deleted file mode 100644
index 896dad8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/254.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/255.pt b/dair_pll_old/assets/contactnets_elbow/255.pt
deleted file mode 100644
index e6056fe..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/255.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/256.pt b/dair_pll_old/assets/contactnets_elbow/256.pt
deleted file mode 100644
index caab766..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/256.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/257.pt b/dair_pll_old/assets/contactnets_elbow/257.pt
deleted file mode 100644
index cdc2e9c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/257.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/258.pt b/dair_pll_old/assets/contactnets_elbow/258.pt
deleted file mode 100644
index c5112c1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/258.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/259.pt b/dair_pll_old/assets/contactnets_elbow/259.pt
deleted file mode 100644
index 13bc84d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/259.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/26.pt b/dair_pll_old/assets/contactnets_elbow/26.pt
deleted file mode 100644
index 1db41c9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/26.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/260.pt b/dair_pll_old/assets/contactnets_elbow/260.pt
deleted file mode 100644
index cc996b1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/260.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/261.pt b/dair_pll_old/assets/contactnets_elbow/261.pt
deleted file mode 100644
index ddef850..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/261.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/262.pt b/dair_pll_old/assets/contactnets_elbow/262.pt
deleted file mode 100644
index 0ad741b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/262.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/263.pt b/dair_pll_old/assets/contactnets_elbow/263.pt
deleted file mode 100644
index 11e5351..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/263.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/264.pt b/dair_pll_old/assets/contactnets_elbow/264.pt
deleted file mode 100644
index 60e89fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/264.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/265.pt b/dair_pll_old/assets/contactnets_elbow/265.pt
deleted file mode 100644
index 4e3ba64..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/265.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/266.pt b/dair_pll_old/assets/contactnets_elbow/266.pt
deleted file mode 100644
index 8ad75c1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/266.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/267.pt b/dair_pll_old/assets/contactnets_elbow/267.pt
deleted file mode 100644
index 0ce61c7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/267.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/268.pt b/dair_pll_old/assets/contactnets_elbow/268.pt
deleted file mode 100644
index e8c0594..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/268.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/269.pt b/dair_pll_old/assets/contactnets_elbow/269.pt
deleted file mode 100644
index f212ddd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/269.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/27.pt b/dair_pll_old/assets/contactnets_elbow/27.pt
deleted file mode 100644
index 3cd1744..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/27.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/270.pt b/dair_pll_old/assets/contactnets_elbow/270.pt
deleted file mode 100644
index e666f7e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/270.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/271.pt b/dair_pll_old/assets/contactnets_elbow/271.pt
deleted file mode 100644
index f91b007..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/271.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/272.pt b/dair_pll_old/assets/contactnets_elbow/272.pt
deleted file mode 100644
index 79e31bc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/272.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/273.pt b/dair_pll_old/assets/contactnets_elbow/273.pt
deleted file mode 100644
index e3304bb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/273.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/274.pt b/dair_pll_old/assets/contactnets_elbow/274.pt
deleted file mode 100644
index 7392ff0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/274.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/275.pt b/dair_pll_old/assets/contactnets_elbow/275.pt
deleted file mode 100644
index c30f356..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/275.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/276.pt b/dair_pll_old/assets/contactnets_elbow/276.pt
deleted file mode 100644
index 3e301f8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/276.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/277.pt b/dair_pll_old/assets/contactnets_elbow/277.pt
deleted file mode 100644
index f46e7fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/277.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/278.pt b/dair_pll_old/assets/contactnets_elbow/278.pt
deleted file mode 100644
index 6965df6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/278.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/279.pt b/dair_pll_old/assets/contactnets_elbow/279.pt
deleted file mode 100644
index f415827..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/279.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/28.pt b/dair_pll_old/assets/contactnets_elbow/28.pt
deleted file mode 100644
index d3adf55..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/28.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/280.pt b/dair_pll_old/assets/contactnets_elbow/280.pt
deleted file mode 100644
index 5ff097d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/280.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/281.pt b/dair_pll_old/assets/contactnets_elbow/281.pt
deleted file mode 100644
index e415d01..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/281.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/282.pt b/dair_pll_old/assets/contactnets_elbow/282.pt
deleted file mode 100644
index 054beb4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/282.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/283.pt b/dair_pll_old/assets/contactnets_elbow/283.pt
deleted file mode 100644
index 22d0af0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/283.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/284.pt b/dair_pll_old/assets/contactnets_elbow/284.pt
deleted file mode 100644
index dbfe63b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/284.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/285.pt b/dair_pll_old/assets/contactnets_elbow/285.pt
deleted file mode 100644
index cda8d52..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/285.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/286.pt b/dair_pll_old/assets/contactnets_elbow/286.pt
deleted file mode 100644
index 060ce0b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/286.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/287.pt b/dair_pll_old/assets/contactnets_elbow/287.pt
deleted file mode 100644
index a201cb7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/287.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/288.pt b/dair_pll_old/assets/contactnets_elbow/288.pt
deleted file mode 100644
index bcb9deb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/288.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/289.pt b/dair_pll_old/assets/contactnets_elbow/289.pt
deleted file mode 100644
index 6da93e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/289.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/29.pt b/dair_pll_old/assets/contactnets_elbow/29.pt
deleted file mode 100644
index 19a83fc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/29.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/290.pt b/dair_pll_old/assets/contactnets_elbow/290.pt
deleted file mode 100644
index c34aef0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/290.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/291.pt b/dair_pll_old/assets/contactnets_elbow/291.pt
deleted file mode 100644
index 28a16e5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/291.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/292.pt b/dair_pll_old/assets/contactnets_elbow/292.pt
deleted file mode 100644
index dc936e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/292.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/293.pt b/dair_pll_old/assets/contactnets_elbow/293.pt
deleted file mode 100644
index 7487cad..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/293.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/294.pt b/dair_pll_old/assets/contactnets_elbow/294.pt
deleted file mode 100644
index f8585ae..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/294.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/295.pt b/dair_pll_old/assets/contactnets_elbow/295.pt
deleted file mode 100644
index 30202c1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/295.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/296.pt b/dair_pll_old/assets/contactnets_elbow/296.pt
deleted file mode 100644
index 097b7c1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/296.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/297.pt b/dair_pll_old/assets/contactnets_elbow/297.pt
deleted file mode 100644
index 665dd9f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/297.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/298.pt b/dair_pll_old/assets/contactnets_elbow/298.pt
deleted file mode 100644
index 7182003..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/298.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/299.pt b/dair_pll_old/assets/contactnets_elbow/299.pt
deleted file mode 100644
index 5efed3d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/299.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/3.pt b/dair_pll_old/assets/contactnets_elbow/3.pt
deleted file mode 100644
index e066d16..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/3.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/30.pt b/dair_pll_old/assets/contactnets_elbow/30.pt
deleted file mode 100644
index eeaf356..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/30.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/300.pt b/dair_pll_old/assets/contactnets_elbow/300.pt
deleted file mode 100644
index 178d746..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/300.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/301.pt b/dair_pll_old/assets/contactnets_elbow/301.pt
deleted file mode 100644
index 56aa4f8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/301.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/302.pt b/dair_pll_old/assets/contactnets_elbow/302.pt
deleted file mode 100644
index bc32a01..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/302.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/303.pt b/dair_pll_old/assets/contactnets_elbow/303.pt
deleted file mode 100644
index fef4d20..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/303.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/304.pt b/dair_pll_old/assets/contactnets_elbow/304.pt
deleted file mode 100644
index c62888b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/304.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/305.pt b/dair_pll_old/assets/contactnets_elbow/305.pt
deleted file mode 100644
index 0cee620..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/305.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/306.pt b/dair_pll_old/assets/contactnets_elbow/306.pt
deleted file mode 100644
index bea2c59..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/306.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/307.pt b/dair_pll_old/assets/contactnets_elbow/307.pt
deleted file mode 100644
index 59a6293..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/307.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/308.pt b/dair_pll_old/assets/contactnets_elbow/308.pt
deleted file mode 100644
index 6d636d5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/308.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/309.pt b/dair_pll_old/assets/contactnets_elbow/309.pt
deleted file mode 100644
index 45f4334..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/309.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/31.pt b/dair_pll_old/assets/contactnets_elbow/31.pt
deleted file mode 100644
index d507dff..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/31.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/310.pt b/dair_pll_old/assets/contactnets_elbow/310.pt
deleted file mode 100644
index 9ae9030..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/310.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/311.pt b/dair_pll_old/assets/contactnets_elbow/311.pt
deleted file mode 100644
index 0c386e2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/311.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/312.pt b/dair_pll_old/assets/contactnets_elbow/312.pt
deleted file mode 100644
index da47d09..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/312.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/313.pt b/dair_pll_old/assets/contactnets_elbow/313.pt
deleted file mode 100644
index b4c5962..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/313.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/314.pt b/dair_pll_old/assets/contactnets_elbow/314.pt
deleted file mode 100644
index d6b6432..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/314.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/315.pt b/dair_pll_old/assets/contactnets_elbow/315.pt
deleted file mode 100644
index 2c6bd22..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/315.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/316.pt b/dair_pll_old/assets/contactnets_elbow/316.pt
deleted file mode 100644
index a1e2963..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/316.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/317.pt b/dair_pll_old/assets/contactnets_elbow/317.pt
deleted file mode 100644
index 25aa916..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/317.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/318.pt b/dair_pll_old/assets/contactnets_elbow/318.pt
deleted file mode 100644
index b251ada..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/318.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/319.pt b/dair_pll_old/assets/contactnets_elbow/319.pt
deleted file mode 100644
index abad698..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/319.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/32.pt b/dair_pll_old/assets/contactnets_elbow/32.pt
deleted file mode 100644
index 76c8a9b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/32.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/320.pt b/dair_pll_old/assets/contactnets_elbow/320.pt
deleted file mode 100644
index b61ff44..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/320.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/321.pt b/dair_pll_old/assets/contactnets_elbow/321.pt
deleted file mode 100644
index 940c8af..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/321.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/322.pt b/dair_pll_old/assets/contactnets_elbow/322.pt
deleted file mode 100644
index eb6ac5b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/322.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/323.pt b/dair_pll_old/assets/contactnets_elbow/323.pt
deleted file mode 100644
index 77ad6d4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/323.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/324.pt b/dair_pll_old/assets/contactnets_elbow/324.pt
deleted file mode 100644
index 00f6872..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/324.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/325.pt b/dair_pll_old/assets/contactnets_elbow/325.pt
deleted file mode 100644
index d2ea071..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/325.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/326.pt b/dair_pll_old/assets/contactnets_elbow/326.pt
deleted file mode 100644
index c53587f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/326.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/327.pt b/dair_pll_old/assets/contactnets_elbow/327.pt
deleted file mode 100644
index a8e8390..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/327.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/328.pt b/dair_pll_old/assets/contactnets_elbow/328.pt
deleted file mode 100644
index 81972e3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/328.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/329.pt b/dair_pll_old/assets/contactnets_elbow/329.pt
deleted file mode 100644
index 0c18ca6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/329.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/33.pt b/dair_pll_old/assets/contactnets_elbow/33.pt
deleted file mode 100644
index 87a1ec6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/33.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/330.pt b/dair_pll_old/assets/contactnets_elbow/330.pt
deleted file mode 100644
index 8c91fa3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/330.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/331.pt b/dair_pll_old/assets/contactnets_elbow/331.pt
deleted file mode 100644
index f958413..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/331.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/332.pt b/dair_pll_old/assets/contactnets_elbow/332.pt
deleted file mode 100644
index 2ec7121..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/332.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/333.pt b/dair_pll_old/assets/contactnets_elbow/333.pt
deleted file mode 100644
index 02573fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/333.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/334.pt b/dair_pll_old/assets/contactnets_elbow/334.pt
deleted file mode 100644
index 4bcc761..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/334.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/335.pt b/dair_pll_old/assets/contactnets_elbow/335.pt
deleted file mode 100644
index bc24f38..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/335.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/336.pt b/dair_pll_old/assets/contactnets_elbow/336.pt
deleted file mode 100644
index 1012fbe..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/336.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/337.pt b/dair_pll_old/assets/contactnets_elbow/337.pt
deleted file mode 100644
index a04c3ee..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/337.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/338.pt b/dair_pll_old/assets/contactnets_elbow/338.pt
deleted file mode 100644
index d436447..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/338.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/339.pt b/dair_pll_old/assets/contactnets_elbow/339.pt
deleted file mode 100644
index 7e8de8d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/339.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/34.pt b/dair_pll_old/assets/contactnets_elbow/34.pt
deleted file mode 100644
index e93074b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/34.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/340.pt b/dair_pll_old/assets/contactnets_elbow/340.pt
deleted file mode 100644
index 85a3ea3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/340.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/341.pt b/dair_pll_old/assets/contactnets_elbow/341.pt
deleted file mode 100644
index 171e09e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/341.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/342.pt b/dair_pll_old/assets/contactnets_elbow/342.pt
deleted file mode 100644
index 734703e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/342.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/343.pt b/dair_pll_old/assets/contactnets_elbow/343.pt
deleted file mode 100644
index 984b52d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/343.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/344.pt b/dair_pll_old/assets/contactnets_elbow/344.pt
deleted file mode 100644
index d5eaa1b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/344.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/345.pt b/dair_pll_old/assets/contactnets_elbow/345.pt
deleted file mode 100644
index 24a3a3b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/345.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/346.pt b/dair_pll_old/assets/contactnets_elbow/346.pt
deleted file mode 100644
index 9690263..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/346.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/347.pt b/dair_pll_old/assets/contactnets_elbow/347.pt
deleted file mode 100644
index fa3d047..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/347.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/348.pt b/dair_pll_old/assets/contactnets_elbow/348.pt
deleted file mode 100644
index 15d6e5f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/348.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/349.pt b/dair_pll_old/assets/contactnets_elbow/349.pt
deleted file mode 100644
index 310f142..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/349.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/35.pt b/dair_pll_old/assets/contactnets_elbow/35.pt
deleted file mode 100644
index 15de32c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/35.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/350.pt b/dair_pll_old/assets/contactnets_elbow/350.pt
deleted file mode 100644
index 90b8f92..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/350.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/351.pt b/dair_pll_old/assets/contactnets_elbow/351.pt
deleted file mode 100644
index fcdae93..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/351.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/352.pt b/dair_pll_old/assets/contactnets_elbow/352.pt
deleted file mode 100644
index 26c73b7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/352.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/353.pt b/dair_pll_old/assets/contactnets_elbow/353.pt
deleted file mode 100644
index 57bba73..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/353.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/354.pt b/dair_pll_old/assets/contactnets_elbow/354.pt
deleted file mode 100644
index 0ab6930..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/354.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/355.pt b/dair_pll_old/assets/contactnets_elbow/355.pt
deleted file mode 100644
index b55752a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/355.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/356.pt b/dair_pll_old/assets/contactnets_elbow/356.pt
deleted file mode 100644
index d92fd98..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/356.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/357.pt b/dair_pll_old/assets/contactnets_elbow/357.pt
deleted file mode 100644
index 5a9ad85..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/357.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/358.pt b/dair_pll_old/assets/contactnets_elbow/358.pt
deleted file mode 100644
index 519f878..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/358.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/359.pt b/dair_pll_old/assets/contactnets_elbow/359.pt
deleted file mode 100644
index 0d0b619..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/359.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/36.pt b/dair_pll_old/assets/contactnets_elbow/36.pt
deleted file mode 100644
index dac61e1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/36.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/360.pt b/dair_pll_old/assets/contactnets_elbow/360.pt
deleted file mode 100644
index f993a1e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/360.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/361.pt b/dair_pll_old/assets/contactnets_elbow/361.pt
deleted file mode 100644
index 30d37f7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/361.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/362.pt b/dair_pll_old/assets/contactnets_elbow/362.pt
deleted file mode 100644
index 4cdd743..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/362.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/363.pt b/dair_pll_old/assets/contactnets_elbow/363.pt
deleted file mode 100644
index da6e5e1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/363.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/364.pt b/dair_pll_old/assets/contactnets_elbow/364.pt
deleted file mode 100644
index 734424f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/364.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/365.pt b/dair_pll_old/assets/contactnets_elbow/365.pt
deleted file mode 100644
index 410362f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/365.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/366.pt b/dair_pll_old/assets/contactnets_elbow/366.pt
deleted file mode 100644
index f31893d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/366.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/367.pt b/dair_pll_old/assets/contactnets_elbow/367.pt
deleted file mode 100644
index 82167db..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/367.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/368.pt b/dair_pll_old/assets/contactnets_elbow/368.pt
deleted file mode 100644
index 897d45c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/368.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/369.pt b/dair_pll_old/assets/contactnets_elbow/369.pt
deleted file mode 100644
index 167dd02..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/369.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/37.pt b/dair_pll_old/assets/contactnets_elbow/37.pt
deleted file mode 100644
index 4c7f7d8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/37.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/370.pt b/dair_pll_old/assets/contactnets_elbow/370.pt
deleted file mode 100644
index e20f0bd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/370.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/371.pt b/dair_pll_old/assets/contactnets_elbow/371.pt
deleted file mode 100644
index 2b0bd7f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/371.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/372.pt b/dair_pll_old/assets/contactnets_elbow/372.pt
deleted file mode 100644
index f6d2a8a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/372.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/373.pt b/dair_pll_old/assets/contactnets_elbow/373.pt
deleted file mode 100644
index 812680f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/373.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/374.pt b/dair_pll_old/assets/contactnets_elbow/374.pt
deleted file mode 100644
index 897c881..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/374.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/375.pt b/dair_pll_old/assets/contactnets_elbow/375.pt
deleted file mode 100644
index 48c06cc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/375.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/376.pt b/dair_pll_old/assets/contactnets_elbow/376.pt
deleted file mode 100644
index cd45c2a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/376.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/377.pt b/dair_pll_old/assets/contactnets_elbow/377.pt
deleted file mode 100644
index 8ac2781..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/377.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/378.pt b/dair_pll_old/assets/contactnets_elbow/378.pt
deleted file mode 100644
index a3c0c1a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/378.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/379.pt b/dair_pll_old/assets/contactnets_elbow/379.pt
deleted file mode 100644
index b9e4637..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/379.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/38.pt b/dair_pll_old/assets/contactnets_elbow/38.pt
deleted file mode 100644
index e910ca0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/38.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/380.pt b/dair_pll_old/assets/contactnets_elbow/380.pt
deleted file mode 100644
index 06c8aad..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/380.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/381.pt b/dair_pll_old/assets/contactnets_elbow/381.pt
deleted file mode 100644
index 7e746d7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/381.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/382.pt b/dair_pll_old/assets/contactnets_elbow/382.pt
deleted file mode 100644
index 6a19052..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/382.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/383.pt b/dair_pll_old/assets/contactnets_elbow/383.pt
deleted file mode 100644
index 6f504b4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/383.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/384.pt b/dair_pll_old/assets/contactnets_elbow/384.pt
deleted file mode 100644
index a0e6a36..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/384.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/385.pt b/dair_pll_old/assets/contactnets_elbow/385.pt
deleted file mode 100644
index 9c60e21..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/385.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/386.pt b/dair_pll_old/assets/contactnets_elbow/386.pt
deleted file mode 100644
index d83fb7d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/386.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/387.pt b/dair_pll_old/assets/contactnets_elbow/387.pt
deleted file mode 100644
index ebb0903..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/387.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/388.pt b/dair_pll_old/assets/contactnets_elbow/388.pt
deleted file mode 100644
index a6ef85e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/388.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/389.pt b/dair_pll_old/assets/contactnets_elbow/389.pt
deleted file mode 100644
index 6ee3fe6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/389.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/39.pt b/dair_pll_old/assets/contactnets_elbow/39.pt
deleted file mode 100644
index 44d0ef1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/39.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/390.pt b/dair_pll_old/assets/contactnets_elbow/390.pt
deleted file mode 100644
index 1562fbb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/390.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/391.pt b/dair_pll_old/assets/contactnets_elbow/391.pt
deleted file mode 100644
index 347fc4d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/391.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/392.pt b/dair_pll_old/assets/contactnets_elbow/392.pt
deleted file mode 100644
index d0f88de..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/392.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/393.pt b/dair_pll_old/assets/contactnets_elbow/393.pt
deleted file mode 100644
index 757a5ce..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/393.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/394.pt b/dair_pll_old/assets/contactnets_elbow/394.pt
deleted file mode 100644
index 8d6596a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/394.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/395.pt b/dair_pll_old/assets/contactnets_elbow/395.pt
deleted file mode 100644
index 4c89c9e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/395.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/396.pt b/dair_pll_old/assets/contactnets_elbow/396.pt
deleted file mode 100644
index 01d15eb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/396.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/397.pt b/dair_pll_old/assets/contactnets_elbow/397.pt
deleted file mode 100644
index 08c342f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/397.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/398.pt b/dair_pll_old/assets/contactnets_elbow/398.pt
deleted file mode 100644
index 7cb0cfc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/398.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/399.pt b/dair_pll_old/assets/contactnets_elbow/399.pt
deleted file mode 100644
index 813592a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/399.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/4.pt b/dair_pll_old/assets/contactnets_elbow/4.pt
deleted file mode 100644
index 0cd02b2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/4.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/40.pt b/dair_pll_old/assets/contactnets_elbow/40.pt
deleted file mode 100644
index 7c2e3f2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/40.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/400.pt b/dair_pll_old/assets/contactnets_elbow/400.pt
deleted file mode 100644
index 831ea2a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/400.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/401.pt b/dair_pll_old/assets/contactnets_elbow/401.pt
deleted file mode 100644
index 9de0fb4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/401.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/402.pt b/dair_pll_old/assets/contactnets_elbow/402.pt
deleted file mode 100644
index 917f827..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/402.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/403.pt b/dair_pll_old/assets/contactnets_elbow/403.pt
deleted file mode 100644
index 1714f3b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/403.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/404.pt b/dair_pll_old/assets/contactnets_elbow/404.pt
deleted file mode 100644
index a6b8181..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/404.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/405.pt b/dair_pll_old/assets/contactnets_elbow/405.pt
deleted file mode 100644
index 8346f5a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/405.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/406.pt b/dair_pll_old/assets/contactnets_elbow/406.pt
deleted file mode 100644
index c06ca40..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/406.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/407.pt b/dair_pll_old/assets/contactnets_elbow/407.pt
deleted file mode 100644
index 51e5e64..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/407.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/408.pt b/dair_pll_old/assets/contactnets_elbow/408.pt
deleted file mode 100644
index a9dc183..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/408.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/409.pt b/dair_pll_old/assets/contactnets_elbow/409.pt
deleted file mode 100644
index 2fcc1d1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/409.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/41.pt b/dair_pll_old/assets/contactnets_elbow/41.pt
deleted file mode 100644
index f8957ce..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/41.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/410.pt b/dair_pll_old/assets/contactnets_elbow/410.pt
deleted file mode 100644
index 4d45436..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/410.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/411.pt b/dair_pll_old/assets/contactnets_elbow/411.pt
deleted file mode 100644
index 8d25839..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/411.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/412.pt b/dair_pll_old/assets/contactnets_elbow/412.pt
deleted file mode 100644
index 98dbb35..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/412.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/413.pt b/dair_pll_old/assets/contactnets_elbow/413.pt
deleted file mode 100644
index 62648ae..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/413.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/414.pt b/dair_pll_old/assets/contactnets_elbow/414.pt
deleted file mode 100644
index 3ecfd84..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/414.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/415.pt b/dair_pll_old/assets/contactnets_elbow/415.pt
deleted file mode 100644
index cfe2598..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/415.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/416.pt b/dair_pll_old/assets/contactnets_elbow/416.pt
deleted file mode 100644
index 25f279e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/416.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/417.pt b/dair_pll_old/assets/contactnets_elbow/417.pt
deleted file mode 100644
index b49d816..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/417.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/418.pt b/dair_pll_old/assets/contactnets_elbow/418.pt
deleted file mode 100644
index d55e8e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/418.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/419.pt b/dair_pll_old/assets/contactnets_elbow/419.pt
deleted file mode 100644
index 0699360..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/419.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/42.pt b/dair_pll_old/assets/contactnets_elbow/42.pt
deleted file mode 100644
index f047e98..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/42.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/420.pt b/dair_pll_old/assets/contactnets_elbow/420.pt
deleted file mode 100644
index 0b1a5b5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/420.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/421.pt b/dair_pll_old/assets/contactnets_elbow/421.pt
deleted file mode 100644
index e8d80ab..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/421.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/422.pt b/dair_pll_old/assets/contactnets_elbow/422.pt
deleted file mode 100644
index 5138a99..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/422.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/423.pt b/dair_pll_old/assets/contactnets_elbow/423.pt
deleted file mode 100644
index 31a379f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/423.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/424.pt b/dair_pll_old/assets/contactnets_elbow/424.pt
deleted file mode 100644
index 1346088..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/424.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/425.pt b/dair_pll_old/assets/contactnets_elbow/425.pt
deleted file mode 100644
index c8949d4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/425.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/426.pt b/dair_pll_old/assets/contactnets_elbow/426.pt
deleted file mode 100644
index a206c9d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/426.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/427.pt b/dair_pll_old/assets/contactnets_elbow/427.pt
deleted file mode 100644
index 4424eda..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/427.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/428.pt b/dair_pll_old/assets/contactnets_elbow/428.pt
deleted file mode 100644
index 407f8af..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/428.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/429.pt b/dair_pll_old/assets/contactnets_elbow/429.pt
deleted file mode 100644
index db27f93..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/429.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/43.pt b/dair_pll_old/assets/contactnets_elbow/43.pt
deleted file mode 100644
index ecacffe..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/43.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/430.pt b/dair_pll_old/assets/contactnets_elbow/430.pt
deleted file mode 100644
index edf8968..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/430.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/431.pt b/dair_pll_old/assets/contactnets_elbow/431.pt
deleted file mode 100644
index 2f591e7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/431.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/432.pt b/dair_pll_old/assets/contactnets_elbow/432.pt
deleted file mode 100644
index b7c482e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/432.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/433.pt b/dair_pll_old/assets/contactnets_elbow/433.pt
deleted file mode 100644
index e7fe630..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/433.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/434.pt b/dair_pll_old/assets/contactnets_elbow/434.pt
deleted file mode 100644
index d5c691f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/434.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/435.pt b/dair_pll_old/assets/contactnets_elbow/435.pt
deleted file mode 100644
index 257b959..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/435.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/436.pt b/dair_pll_old/assets/contactnets_elbow/436.pt
deleted file mode 100644
index 02ef173..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/436.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/437.pt b/dair_pll_old/assets/contactnets_elbow/437.pt
deleted file mode 100644
index 44142af..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/437.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/438.pt b/dair_pll_old/assets/contactnets_elbow/438.pt
deleted file mode 100644
index 662b0e3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/438.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/439.pt b/dair_pll_old/assets/contactnets_elbow/439.pt
deleted file mode 100644
index 5535e49..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/439.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/44.pt b/dair_pll_old/assets/contactnets_elbow/44.pt
deleted file mode 100644
index cb2847a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/44.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/440.pt b/dair_pll_old/assets/contactnets_elbow/440.pt
deleted file mode 100644
index c9355ec..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/440.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/441.pt b/dair_pll_old/assets/contactnets_elbow/441.pt
deleted file mode 100644
index 548c5fd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/441.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/442.pt b/dair_pll_old/assets/contactnets_elbow/442.pt
deleted file mode 100644
index f9772ce..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/442.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/443.pt b/dair_pll_old/assets/contactnets_elbow/443.pt
deleted file mode 100644
index 13f8f85..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/443.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/444.pt b/dair_pll_old/assets/contactnets_elbow/444.pt
deleted file mode 100644
index 9852bf6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/444.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/445.pt b/dair_pll_old/assets/contactnets_elbow/445.pt
deleted file mode 100644
index b69177b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/445.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/446.pt b/dair_pll_old/assets/contactnets_elbow/446.pt
deleted file mode 100644
index 46bec62..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/446.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/447.pt b/dair_pll_old/assets/contactnets_elbow/447.pt
deleted file mode 100644
index d09c7b8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/447.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/448.pt b/dair_pll_old/assets/contactnets_elbow/448.pt
deleted file mode 100644
index b8b9efb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/448.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/449.pt b/dair_pll_old/assets/contactnets_elbow/449.pt
deleted file mode 100644
index 9056b7a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/449.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/45.pt b/dair_pll_old/assets/contactnets_elbow/45.pt
deleted file mode 100644
index c74b1e2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/45.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/450.pt b/dair_pll_old/assets/contactnets_elbow/450.pt
deleted file mode 100644
index ab77ee0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/450.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/451.pt b/dair_pll_old/assets/contactnets_elbow/451.pt
deleted file mode 100644
index ac740d6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/451.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/452.pt b/dair_pll_old/assets/contactnets_elbow/452.pt
deleted file mode 100644
index 6050c1d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/452.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/453.pt b/dair_pll_old/assets/contactnets_elbow/453.pt
deleted file mode 100644
index 9ef6519..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/453.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/454.pt b/dair_pll_old/assets/contactnets_elbow/454.pt
deleted file mode 100644
index 3df0233..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/454.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/455.pt b/dair_pll_old/assets/contactnets_elbow/455.pt
deleted file mode 100644
index 8e175ff..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/455.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/456.pt b/dair_pll_old/assets/contactnets_elbow/456.pt
deleted file mode 100644
index 02d777e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/456.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/457.pt b/dair_pll_old/assets/contactnets_elbow/457.pt
deleted file mode 100644
index e5cc566..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/457.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/458.pt b/dair_pll_old/assets/contactnets_elbow/458.pt
deleted file mode 100644
index 153a10e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/458.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/459.pt b/dair_pll_old/assets/contactnets_elbow/459.pt
deleted file mode 100644
index 47d2a90..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/459.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/46.pt b/dair_pll_old/assets/contactnets_elbow/46.pt
deleted file mode 100644
index a27c34a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/46.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/460.pt b/dair_pll_old/assets/contactnets_elbow/460.pt
deleted file mode 100644
index 47e7506..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/460.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/461.pt b/dair_pll_old/assets/contactnets_elbow/461.pt
deleted file mode 100644
index 2d85a2e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/461.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/462.pt b/dair_pll_old/assets/contactnets_elbow/462.pt
deleted file mode 100644
index e206632..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/462.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/463.pt b/dair_pll_old/assets/contactnets_elbow/463.pt
deleted file mode 100644
index d08261f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/463.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/464.pt b/dair_pll_old/assets/contactnets_elbow/464.pt
deleted file mode 100644
index 168552c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/464.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/465.pt b/dair_pll_old/assets/contactnets_elbow/465.pt
deleted file mode 100644
index 808245f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/465.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/466.pt b/dair_pll_old/assets/contactnets_elbow/466.pt
deleted file mode 100644
index 8e4f1dd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/466.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/467.pt b/dair_pll_old/assets/contactnets_elbow/467.pt
deleted file mode 100644
index 1154d65..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/467.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/468.pt b/dair_pll_old/assets/contactnets_elbow/468.pt
deleted file mode 100644
index 87971db..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/468.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/469.pt b/dair_pll_old/assets/contactnets_elbow/469.pt
deleted file mode 100644
index 2328a04..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/469.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/47.pt b/dair_pll_old/assets/contactnets_elbow/47.pt
deleted file mode 100644
index 90b3d78..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/47.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/470.pt b/dair_pll_old/assets/contactnets_elbow/470.pt
deleted file mode 100644
index d4f7b88..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/470.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/471.pt b/dair_pll_old/assets/contactnets_elbow/471.pt
deleted file mode 100644
index 75a0e65..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/471.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/472.pt b/dair_pll_old/assets/contactnets_elbow/472.pt
deleted file mode 100644
index d40858e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/472.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/473.pt b/dair_pll_old/assets/contactnets_elbow/473.pt
deleted file mode 100644
index cd95af2..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/473.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/474.pt b/dair_pll_old/assets/contactnets_elbow/474.pt
deleted file mode 100644
index 4b97195..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/474.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/475.pt b/dair_pll_old/assets/contactnets_elbow/475.pt
deleted file mode 100644
index a74c456..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/475.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/476.pt b/dair_pll_old/assets/contactnets_elbow/476.pt
deleted file mode 100644
index 23766f5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/476.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/477.pt b/dair_pll_old/assets/contactnets_elbow/477.pt
deleted file mode 100644
index b81c521..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/477.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/478.pt b/dair_pll_old/assets/contactnets_elbow/478.pt
deleted file mode 100644
index 11fbd70..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/478.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/479.pt b/dair_pll_old/assets/contactnets_elbow/479.pt
deleted file mode 100644
index 68f6e6f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/479.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/48.pt b/dair_pll_old/assets/contactnets_elbow/48.pt
deleted file mode 100644
index 857eb5f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/48.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/480.pt b/dair_pll_old/assets/contactnets_elbow/480.pt
deleted file mode 100644
index e2b8df8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/480.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/481.pt b/dair_pll_old/assets/contactnets_elbow/481.pt
deleted file mode 100644
index 588bba3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/481.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/482.pt b/dair_pll_old/assets/contactnets_elbow/482.pt
deleted file mode 100644
index 3ad2a1b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/482.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/483.pt b/dair_pll_old/assets/contactnets_elbow/483.pt
deleted file mode 100644
index a5f0278..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/483.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/484.pt b/dair_pll_old/assets/contactnets_elbow/484.pt
deleted file mode 100644
index 64ee3de..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/484.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/485.pt b/dair_pll_old/assets/contactnets_elbow/485.pt
deleted file mode 100644
index fec4fc9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/485.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/486.pt b/dair_pll_old/assets/contactnets_elbow/486.pt
deleted file mode 100644
index fc72da4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/486.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/487.pt b/dair_pll_old/assets/contactnets_elbow/487.pt
deleted file mode 100644
index aece9cc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/487.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/488.pt b/dair_pll_old/assets/contactnets_elbow/488.pt
deleted file mode 100644
index 1b5d0e0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/488.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/489.pt b/dair_pll_old/assets/contactnets_elbow/489.pt
deleted file mode 100644
index 5bafa36..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/489.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/49.pt b/dair_pll_old/assets/contactnets_elbow/49.pt
deleted file mode 100644
index 6104425..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/49.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/490.pt b/dair_pll_old/assets/contactnets_elbow/490.pt
deleted file mode 100644
index 99090b4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/490.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/491.pt b/dair_pll_old/assets/contactnets_elbow/491.pt
deleted file mode 100644
index c8495f1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/491.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/492.pt b/dair_pll_old/assets/contactnets_elbow/492.pt
deleted file mode 100644
index f6232e8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/492.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/493.pt b/dair_pll_old/assets/contactnets_elbow/493.pt
deleted file mode 100644
index 9c28b65..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/493.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/494.pt b/dair_pll_old/assets/contactnets_elbow/494.pt
deleted file mode 100644
index 13c8a25..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/494.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/495.pt b/dair_pll_old/assets/contactnets_elbow/495.pt
deleted file mode 100644
index 66a9f1b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/495.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/496.pt b/dair_pll_old/assets/contactnets_elbow/496.pt
deleted file mode 100644
index 93871f5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/496.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/497.pt b/dair_pll_old/assets/contactnets_elbow/497.pt
deleted file mode 100644
index c4e1b09..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/497.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/498.pt b/dair_pll_old/assets/contactnets_elbow/498.pt
deleted file mode 100644
index 9b8baf9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/498.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/499.pt b/dair_pll_old/assets/contactnets_elbow/499.pt
deleted file mode 100644
index 7976f2e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/499.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/5.pt b/dair_pll_old/assets/contactnets_elbow/5.pt
deleted file mode 100644
index 62e2c47..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/5.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/50.pt b/dair_pll_old/assets/contactnets_elbow/50.pt
deleted file mode 100644
index 79a1217..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/50.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/500.pt b/dair_pll_old/assets/contactnets_elbow/500.pt
deleted file mode 100644
index ecc8773..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/500.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/501.pt b/dair_pll_old/assets/contactnets_elbow/501.pt
deleted file mode 100644
index bee1113..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/501.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/502.pt b/dair_pll_old/assets/contactnets_elbow/502.pt
deleted file mode 100644
index 82d51e4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/502.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/503.pt b/dair_pll_old/assets/contactnets_elbow/503.pt
deleted file mode 100644
index 814d1ab..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/503.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/504.pt b/dair_pll_old/assets/contactnets_elbow/504.pt
deleted file mode 100644
index 6201add..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/504.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/505.pt b/dair_pll_old/assets/contactnets_elbow/505.pt
deleted file mode 100644
index 609b41f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/505.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/506.pt b/dair_pll_old/assets/contactnets_elbow/506.pt
deleted file mode 100644
index e466ca4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/506.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/507.pt b/dair_pll_old/assets/contactnets_elbow/507.pt
deleted file mode 100644
index c20beca..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/507.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/508.pt b/dair_pll_old/assets/contactnets_elbow/508.pt
deleted file mode 100644
index 2a04ebd..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/508.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/509.pt b/dair_pll_old/assets/contactnets_elbow/509.pt
deleted file mode 100644
index 9c2fddc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/509.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/51.pt b/dair_pll_old/assets/contactnets_elbow/51.pt
deleted file mode 100644
index af91b16..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/51.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/510.pt b/dair_pll_old/assets/contactnets_elbow/510.pt
deleted file mode 100644
index fc75869..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/510.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/511.pt b/dair_pll_old/assets/contactnets_elbow/511.pt
deleted file mode 100644
index 4dce8df..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/511.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/512.pt b/dair_pll_old/assets/contactnets_elbow/512.pt
deleted file mode 100644
index 9246d16..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/512.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/513.pt b/dair_pll_old/assets/contactnets_elbow/513.pt
deleted file mode 100644
index ad8823c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/513.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/514.pt b/dair_pll_old/assets/contactnets_elbow/514.pt
deleted file mode 100644
index 2470254..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/514.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/515.pt b/dair_pll_old/assets/contactnets_elbow/515.pt
deleted file mode 100644
index 81c701d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/515.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/516.pt b/dair_pll_old/assets/contactnets_elbow/516.pt
deleted file mode 100644
index 144d121..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/516.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/517.pt b/dair_pll_old/assets/contactnets_elbow/517.pt
deleted file mode 100644
index 536926a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/517.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/518.pt b/dair_pll_old/assets/contactnets_elbow/518.pt
deleted file mode 100644
index 75cee94..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/518.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/519.pt b/dair_pll_old/assets/contactnets_elbow/519.pt
deleted file mode 100644
index 24e85c1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/519.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/52.pt b/dair_pll_old/assets/contactnets_elbow/52.pt
deleted file mode 100644
index e0fae73..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/52.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/520.pt b/dair_pll_old/assets/contactnets_elbow/520.pt
deleted file mode 100644
index 9bd467a..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/520.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/521.pt b/dair_pll_old/assets/contactnets_elbow/521.pt
deleted file mode 100644
index b9e8955..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/521.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/522.pt b/dair_pll_old/assets/contactnets_elbow/522.pt
deleted file mode 100644
index 87eb4e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/522.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/523.pt b/dair_pll_old/assets/contactnets_elbow/523.pt
deleted file mode 100644
index 00bdf2d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/523.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/524.pt b/dair_pll_old/assets/contactnets_elbow/524.pt
deleted file mode 100644
index 02a6460..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/524.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/525.pt b/dair_pll_old/assets/contactnets_elbow/525.pt
deleted file mode 100644
index 48a4791..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/525.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/526.pt b/dair_pll_old/assets/contactnets_elbow/526.pt
deleted file mode 100644
index f1059fc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/526.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/527.pt b/dair_pll_old/assets/contactnets_elbow/527.pt
deleted file mode 100644
index 2311128..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/527.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/528.pt b/dair_pll_old/assets/contactnets_elbow/528.pt
deleted file mode 100644
index 7688451..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/528.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/529.pt b/dair_pll_old/assets/contactnets_elbow/529.pt
deleted file mode 100644
index b0f81c8..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/529.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/53.pt b/dair_pll_old/assets/contactnets_elbow/53.pt
deleted file mode 100644
index 5dc9f33..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/53.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/530.pt b/dair_pll_old/assets/contactnets_elbow/530.pt
deleted file mode 100644
index 7fc16f1..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/530.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/531.pt b/dair_pll_old/assets/contactnets_elbow/531.pt
deleted file mode 100644
index 315aa4e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/531.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/532.pt b/dair_pll_old/assets/contactnets_elbow/532.pt
deleted file mode 100644
index 539b596..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/532.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/533.pt b/dair_pll_old/assets/contactnets_elbow/533.pt
deleted file mode 100644
index 184ecbe..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/533.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/534.pt b/dair_pll_old/assets/contactnets_elbow/534.pt
deleted file mode 100644
index cedf571..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/534.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/535.pt b/dair_pll_old/assets/contactnets_elbow/535.pt
deleted file mode 100644
index 92422b5..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/535.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/536.pt b/dair_pll_old/assets/contactnets_elbow/536.pt
deleted file mode 100644
index d292579..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/536.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/537.pt b/dair_pll_old/assets/contactnets_elbow/537.pt
deleted file mode 100644
index 09bb7c7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/537.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/538.pt b/dair_pll_old/assets/contactnets_elbow/538.pt
deleted file mode 100644
index 43a7bb3..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/538.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/539.pt b/dair_pll_old/assets/contactnets_elbow/539.pt
deleted file mode 100644
index 53e0227..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/539.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/54.pt b/dair_pll_old/assets/contactnets_elbow/54.pt
deleted file mode 100644
index dbc0739..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/54.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/540.pt b/dair_pll_old/assets/contactnets_elbow/540.pt
deleted file mode 100644
index c50c6fa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/540.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/55.pt b/dair_pll_old/assets/contactnets_elbow/55.pt
deleted file mode 100644
index 82f5c46..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/55.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/56.pt b/dair_pll_old/assets/contactnets_elbow/56.pt
deleted file mode 100644
index 6663797..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/56.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/57.pt b/dair_pll_old/assets/contactnets_elbow/57.pt
deleted file mode 100644
index bd46eaa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/57.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/58.pt b/dair_pll_old/assets/contactnets_elbow/58.pt
deleted file mode 100644
index 244fdfa..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/58.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/59.pt b/dair_pll_old/assets/contactnets_elbow/59.pt
deleted file mode 100644
index 966f7fc..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/59.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/6.pt b/dair_pll_old/assets/contactnets_elbow/6.pt
deleted file mode 100644
index bb0ee16..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/6.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/60.pt b/dair_pll_old/assets/contactnets_elbow/60.pt
deleted file mode 100644
index 485f6c4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/60.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/61.pt b/dair_pll_old/assets/contactnets_elbow/61.pt
deleted file mode 100644
index 2bdf493..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/61.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/62.pt b/dair_pll_old/assets/contactnets_elbow/62.pt
deleted file mode 100644
index 4d7e713..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/62.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/63.pt b/dair_pll_old/assets/contactnets_elbow/63.pt
deleted file mode 100644
index 88620ae..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/63.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/64.pt b/dair_pll_old/assets/contactnets_elbow/64.pt
deleted file mode 100644
index 0279a63..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/64.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/65.pt b/dair_pll_old/assets/contactnets_elbow/65.pt
deleted file mode 100644
index 768f0a4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/65.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/66.pt b/dair_pll_old/assets/contactnets_elbow/66.pt
deleted file mode 100644
index d5c3416..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/66.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/67.pt b/dair_pll_old/assets/contactnets_elbow/67.pt
deleted file mode 100644
index a5f3d41..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/67.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/68.pt b/dair_pll_old/assets/contactnets_elbow/68.pt
deleted file mode 100644
index ba8de7b..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/68.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/69.pt b/dair_pll_old/assets/contactnets_elbow/69.pt
deleted file mode 100644
index c719692..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/69.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/7.pt b/dair_pll_old/assets/contactnets_elbow/7.pt
deleted file mode 100644
index 19c5655..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/7.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/70.pt b/dair_pll_old/assets/contactnets_elbow/70.pt
deleted file mode 100644
index e8ba6c0..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/70.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/71.pt b/dair_pll_old/assets/contactnets_elbow/71.pt
deleted file mode 100644
index 41232bb..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/71.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/72.pt b/dair_pll_old/assets/contactnets_elbow/72.pt
deleted file mode 100644
index 518e313..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/72.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/73.pt b/dair_pll_old/assets/contactnets_elbow/73.pt
deleted file mode 100644
index b6609e9..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/73.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/74.pt b/dair_pll_old/assets/contactnets_elbow/74.pt
deleted file mode 100644
index 0a39d17..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/74.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/75.pt b/dair_pll_old/assets/contactnets_elbow/75.pt
deleted file mode 100644
index 0f30205..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/75.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/76.pt b/dair_pll_old/assets/contactnets_elbow/76.pt
deleted file mode 100644
index 73e7791..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/76.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/77.pt b/dair_pll_old/assets/contactnets_elbow/77.pt
deleted file mode 100644
index 6d51d6e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/77.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/78.pt b/dair_pll_old/assets/contactnets_elbow/78.pt
deleted file mode 100644
index 32afba4..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/78.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/79.pt b/dair_pll_old/assets/contactnets_elbow/79.pt
deleted file mode 100644
index d683cf6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/79.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/8.pt b/dair_pll_old/assets/contactnets_elbow/8.pt
deleted file mode 100644
index 237eb86..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/8.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/80.pt b/dair_pll_old/assets/contactnets_elbow/80.pt
deleted file mode 100644
index c3cf991..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/80.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/81.pt b/dair_pll_old/assets/contactnets_elbow/81.pt
deleted file mode 100644
index b9cef14..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/81.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/82.pt b/dair_pll_old/assets/contactnets_elbow/82.pt
deleted file mode 100644
index 1f72502..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/82.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/83.pt b/dair_pll_old/assets/contactnets_elbow/83.pt
deleted file mode 100644
index e546add..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/83.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/84.pt b/dair_pll_old/assets/contactnets_elbow/84.pt
deleted file mode 100644
index 6ef5603..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/84.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/85.pt b/dair_pll_old/assets/contactnets_elbow/85.pt
deleted file mode 100644
index 09ef437..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/85.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/86.pt b/dair_pll_old/assets/contactnets_elbow/86.pt
deleted file mode 100644
index 4dac29f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/86.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/87.pt b/dair_pll_old/assets/contactnets_elbow/87.pt
deleted file mode 100644
index 3a31b5c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/87.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/88.pt b/dair_pll_old/assets/contactnets_elbow/88.pt
deleted file mode 100644
index a6ff104..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/88.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/89.pt b/dair_pll_old/assets/contactnets_elbow/89.pt
deleted file mode 100644
index da3c07f..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/89.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/9.pt b/dair_pll_old/assets/contactnets_elbow/9.pt
deleted file mode 100644
index 120e44e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/9.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/90.pt b/dair_pll_old/assets/contactnets_elbow/90.pt
deleted file mode 100644
index 66c09bf..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/90.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/91.pt b/dair_pll_old/assets/contactnets_elbow/91.pt
deleted file mode 100644
index c77b43e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/91.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/92.pt b/dair_pll_old/assets/contactnets_elbow/92.pt
deleted file mode 100644
index 7d2c505..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/92.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/93.pt b/dair_pll_old/assets/contactnets_elbow/93.pt
deleted file mode 100644
index d7c7d1d..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/93.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/94.pt b/dair_pll_old/assets/contactnets_elbow/94.pt
deleted file mode 100644
index a19fd0e..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/94.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/95.pt b/dair_pll_old/assets/contactnets_elbow/95.pt
deleted file mode 100644
index b077ba7..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/95.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/96.pt b/dair_pll_old/assets/contactnets_elbow/96.pt
deleted file mode 100644
index 302945c..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/96.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/97.pt b/dair_pll_old/assets/contactnets_elbow/97.pt
deleted file mode 100644
index ef677ea..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/97.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/98.pt b/dair_pll_old/assets/contactnets_elbow/98.pt
deleted file mode 100644
index c4ed1c6..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/98.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow/99.pt b/dair_pll_old/assets/contactnets_elbow/99.pt
deleted file mode 100644
index 6f1be83..0000000
Binary files a/dair_pll_old/assets/contactnets_elbow/99.pt and /dev/null differ
diff --git a/dair_pll_old/assets/contactnets_elbow_bad_init.urdf b/dair_pll_old/assets/contactnets_elbow_bad_init.urdf
deleted file mode 100644
index 35302ff..0000000
--- a/dair_pll_old/assets/contactnets_elbow_bad_init.urdf
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/contactnets_elbow_mesh.urdf b/dair_pll_old/assets/contactnets_elbow_mesh.urdf
deleted file mode 100644
index c944c58..0000000
--- a/dair_pll_old/assets/contactnets_elbow_mesh.urdf
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/contactnets_elbow_mesh_small_init.urdf b/dair_pll_old/assets/contactnets_elbow_mesh_small_init.urdf
deleted file mode 100644
index 90da286..0000000
--- a/dair_pll_old/assets/contactnets_elbow_mesh_small_init.urdf
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/contactnets_elbow_small_init.urdf b/dair_pll_old/assets/contactnets_elbow_small_init.urdf
deleted file mode 100644
index 192f391..0000000
--- a/dair_pll_old/assets/contactnets_elbow_small_init.urdf
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/cube_mujoco.xml b/dair_pll_old/assets/cube_mujoco.xml
deleted file mode 100644
index 966e315..0000000
--- a/dair_pll_old/assets/cube_mujoco.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/assets/cube_mujoco_200.xml b/dair_pll_old/assets/cube_mujoco_200.xml
deleted file mode 100644
index 8afea77..0000000
--- a/dair_pll_old/assets/cube_mujoco_200.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/mesh_all_tosses.urdf b/dair_pll_old/assets/mesh_all_tosses.urdf
deleted file mode 100644
index 9139abb..0000000
--- a/dair_pll_old/assets/mesh_all_tosses.urdf
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/assets/static.html b/dair_pll_old/assets/static.html
deleted file mode 100644
index a47af64..0000000
--- a/dair_pll_old/assets/static.html
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
\ No newline at end of file
diff --git a/dair_pll_old/dair_pll/__init__.py b/dair_pll_old/dair_pll/__init__.py
deleted file mode 100644
index ea24262..0000000
--- a/dair_pll_old/dair_pll/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os
-
-__all__ = []
-for module in os.listdir(os.path.dirname(__file__)):
- if module == '__init__.py' or module[-3:] != '.py':
- continue
- __all__.append(module[:-3])
-del module
diff --git a/dair_pll_old/dair_pll/dataset_generation.py b/dair_pll_old/dair_pll/dataset_generation.py
deleted file mode 100644
index 3645a13..0000000
--- a/dair_pll_old/dair_pll/dataset_generation.py
+++ /dev/null
@@ -1,153 +0,0 @@
-r""" This module also contains utilities for generating simulation data
-from a :class:`~dair_pll.system.System`\ .
-
-Centers around the :class:`ExperimentDatasetGenerator` type, which takes in a
-configuration describing trajectory parameters, as well as distributions for
-initial conditions and noise to apply to simulated states.
-"""
-from dataclasses import dataclass
-from typing import Type, Union, List
-
-import pdb
-
-import torch
-from torch import Tensor
-
-from dair_pll import file_utils
-from dair_pll.state_space import CenteredSampler, UniformSampler, \
- GaussianWhiteNoiser, UniformWhiteNoiser
-from dair_pll.system import System
-
-DEFAULT_TRAJECTORY_BATCH_SIZE = 30
-"""Number of trajectories to simulate before intermediate save-to-disk."""
-
-@dataclass
-class DataGenerationConfig:
- """:func:`~dataclasses.dataclass` for configuring generation of a
- trajectory dataset."""
- # pylint: disable=too-many-instance-attributes
- dt: float = 1e-3
- r"""Time step, ``> 0``\ ."""
- n_pop: float = 16384
- r"""Total number of trajectories to select from, ``>= 0``\ ."""
- trajectory_length: int = 80
- r"""Trajectory length, ``>= 1``\ ."""
- x_0: Tensor = Tensor()
- """Nominal initial states."""
- sampler_type: Type[CenteredSampler] = UniformSampler
- r"""Distribution for sampling around :attr:`x_0`\ ."""
- sampler_ranges: Tensor = Tensor()
- r"""``(2 * n_v)`` size of perturbations sampled around :attr:`x_0`\ ."""
- noiser_type: Union[Type[GaussianWhiteNoiser], Type[UniformWhiteNoiser]] = \
- GaussianWhiteNoiser
- """Type of noise to add to data."""
- static_noise: Tensor = Tensor()
- """``(2 * n_v)`` sampler ranges for constant-in-time trajectory noise."""
- dynamic_noise: Tensor = Tensor()
- """``(2 * n_v)`` sampler ranges for i.i.d.-in-time trajectory noise."""
- storage: str = './'
- """Experiment folder for data storage. Defaults to working directory."""
-
- def __post_init__(self):
- """Method to check validity of parameters."""
- assert self.dt > 0.
- assert self.n_pop >= 0
- assert self.trajectory_length >= 1
- assert self.sampler_ranges.nelement() == self.static_noise.nelement()
- assert self.static_noise.nelement() == self.dynamic_noise.nelement()
-
-
-class ExperimentDatasetGenerator:
- r"""Conducts generation of a population of simulated trajectories from
- a provided system.
-
- These trajectories are stored on disk at a location described in the
- generator's configuration. Two sets are generated: one `ground truth`
- set, which are precisely what the system predicts; and a `learning` set
- which has artificial measurement noise added.
-
- The various parameters describing the qualities of the dataset are given
- in the provided :py:class:`DataGenerationConfig`\ .
- """
- system: System
- config: DataGenerationConfig
-
- def __init__(self, system: System, config: DataGenerationConfig) -> None:
- self.system = system
- self.config = config
-
- def generate(self) -> None:
- """Simulate trajectories and write them to disk."""
- config = self.config
- n_pop = config.n_pop
- ground_truth_dir = file_utils.ground_truth_data_dir(config.storage)
- learning_dir = file_utils.learning_data_dir(config.storage)
- n_on_disk = file_utils.get_trajectory_count(ground_truth_dir)
- n_to_add = DEFAULT_TRAJECTORY_BATCH_SIZE
- while n_on_disk < n_pop:
- n_on_disk = file_utils.get_trajectory_count(ground_truth_dir)
- n_to_add = min(n_to_add, max(n_pop - n_on_disk, 0))
- if n_to_add == 0:
- break
- ground_truth_trajectories = self.simulate_trajectory_set(n_to_add)
- learning_trajectories = self.make_noised_trajectories(
- ground_truth_trajectories)
- for relative_index in range(n_to_add):
- ground_truth_file = file_utils.trajectory_file(
- ground_truth_dir, n_on_disk + relative_index)
- torch.save(ground_truth_trajectories[relative_index],
- ground_truth_file)
-
- learning_file = file_utils.trajectory_file(
- learning_dir, n_on_disk + relative_index)
- torch.save(learning_trajectories[relative_index], learning_file)
-
- def simulate_trajectory_set(self, num_trajectories: int) -> List[Tensor]:
- """Simulate trajectories using :py:attr:`system`
-
- Args:
- num_trajectories: number of trajectories to simulate
-
- Returns:
- List of ``(T, self.system.space.n_x)`` trajectories.
- """
- assert num_trajectories >= 0
- config = self.config
- system = self.system
- starting_state = config.x_0
- system.set_state_sampler(
- config.sampler_type(system.space,
- config.sampler_ranges,
- x_0=starting_state))
-
- trajectories = []
- for _ in range(num_trajectories):
- trajectory, _ = system.sample_trajectory(config.trajectory_length)
- trajectories.append(trajectory)
- return trajectories
-
- def make_noised_trajectories(self, traj_set: List[Tensor]) -> List[Tensor]:
- r"""Given ground-truth trajectories predicted with :py:attr:`system`\ ,
- returns corresponding set of learning trajectories with added
- measurement noise.
-
- Args:
- traj_set: List of ground-truth ``(*, self.system.space.n_x)`` state
- trajectories.
-
- Returns:
- List of ``(*, self.system.space.n_x)`` noisy state trajectories.
- """
- config = self.config
- noiser = config.noiser_type(self.system.space)
- noised_trajectories = []
- for traj in traj_set:
- static_disturbed = noiser.noise(traj,
- config.static_noise,
- independent=False)
- dynamic_disturbed = noiser.noise(static_disturbed,
- config.dynamic_noise)
- dynamic_disturbed = self.system.space.project_derivative(
- dynamic_disturbed, config.dt)
- noised_trajectories.append(dynamic_disturbed)
- return noised_trajectories
diff --git a/dair_pll_old/dair_pll/dataset_management.py b/dair_pll_old/dair_pll/dataset_management.py
deleted file mode 100644
index 74054ab..0000000
--- a/dair_pll_old/dair_pll/dataset_management.py
+++ /dev/null
@@ -1,273 +0,0 @@
-r"""Classes for generating and managing datasets for experiments.
-
-Centers around the :class:`ExperimentDataManager` type, which transforms a
-set of trajectories saved to disk for various tasks encountered during an
-experiment."""
-from dataclasses import dataclass, field
-from typing import List, Tuple, Optional, cast
-
-import torch
-from torch import Tensor
-from torch.utils.data import Dataset
-
-from dair_pll import file_utils
-
-
-@dataclass
-class TrajectorySliceConfig:
- """:func:`~dataclasses.dataclass` for configuring a trajectory slicing
- for training process."""
- t_skip: int = 0
- """Index of first time to predict from ``>=`` :attr:`t_history` ``- 1``."""
- t_history: int = 1
- r"""Number of steps in initial condition for prediction, ``>= 1``\ ."""
- t_prediction: int = 1
- r"""Number of future steps to use during training/evaluation, ``>= 1``\ ."""
-
- def __post_init__(self):
- """Method to check validity of parameters."""
- assert self.t_skip + 1 >= self.t_history
- assert self.t_history >= 1
- assert self.t_prediction >= 1
-
-
-@dataclass
-class DataConfig:
- """:func:`~dataclasses.dataclass` for configuring a trajectory dataset."""
- dt: float = 1e-3
- r"""Time step, ``> 0``\ ."""
- train_fraction: float = 0.5
- r"""Fraction of training trajectories to select, ``<= 1, >= 0``\ ."""
- valid_fraction: float = 0.25
- r"""Fraction of validation trajectories to select, ``<= 1, >= 0``\ ."""
- test_fraction: float = 0.25
- r"""Fraction of testing trajectories to select, ``<= 1, >= 0``\ ."""
- slice_config: TrajectorySliceConfig = field(
- default_factory=TrajectorySliceConfig)
- r"""Config for arranging trajectories into times slices for training."""
- update_dynamically: bool = False
- """Whether to check for new trajectories after each epoch."""
-
- def __post_init__(self):
- """Method to check validity of parameters."""
- fractions = [
- self.train_fraction, self.valid_fraction, self.test_fraction
- ]
- assert all(0. <= fraction <= 1. for fraction in fractions)
- assert sum(fractions) <= 1
-
-
-class TrajectorySliceDataset(Dataset):
- r"""Dataset of trajectory slices for training.
-
- Given a list of trajectories and a :py:class:`TrajectorySliceConfig`\ ,
- generates sets of (previous states, future states) transition pairs to be
- used with the training loss of an experiment.
-
- Extends :py:class:`torch.utils.data.Dataset` type in order to be managed
- in the training process with a :py:class:`torch.utils.data.DataLoader`\ .
- """
- config: TrajectorySliceConfig
- """Slice configuration describing durations and start index."""
- previous_states_slices: List[Tensor]
- r"""Initial conditions of duration ``self.config.t_history`` ."""
- future_states_slices: List[Tensor]
- r"""Future targets of duration ``self.config.t_prediction`` ."""
-
- def __init__(self, config: TrajectorySliceConfig):
- """
- Args:
- config: configuration object for slice dataset.
- """
- self.config = config
- self.previous_states_slices = [] # type: List[Tensor]
- self.future_states_slices = [] # type: List[Tensor]
-
- def add_slices_from_trajectory(self, trajectory: Tensor) -> None:
- """Incorporate trajectory into dataset as a set of slices.
-
- Args:
- trajectory: ``(T, *)`` state trajectory.
- """
- trajectory_length = trajectory.shape[0]
- first_time_index = self.config.t_skip
- last_time_index = trajectory_length - self.config.t_prediction
- previous_states_length = self.config.t_history
- future_states_length = self.config.t_prediction
- assert first_time_index <= last_time_index
- for index in range(first_time_index, last_time_index):
- self.previous_states_slices.append(
- trajectory[(index + 1 - previous_states_length):(index + 1), :])
- self.future_states_slices.append(
- trajectory[(index + 1):(index + 1 + future_states_length), :])
-
- def __len__(self) -> int:
- """Length of dataset as number of total slice pairs."""
- return len(self.previous_states_slices)
-
- def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
- """Retrieve slice pair at index."""
- return self.previous_states_slices[idx], self.future_states_slices[idx]
-
-
-@dataclass
-class TrajectorySet:
- """Dataclass encapsulating the various transforms of a set of
- trajectories that are used during the training and evaluation process,
- including:
-
- * Slices for training;
- * Entire trajectories for evaluation; and
- * Indices associated with on-disk location for experiment resumption.
- """
- slices: TrajectorySliceDataset
- """Trajectories rendered as a dataset of time slices."""
- trajectories: List[Tensor] = field(default_factory=lambda: [])
- """Trajectories in their raw format."""
- indices: Tensor = field(default_factory=lambda: Tensor([]).long())
- """Indices associated with on-disk filenames."""
-
- def __post_init__(self):
- """Validate correspondence between trajectories and indices."""
- assert self.indices.nelement() == len(self.trajectories)
- # assure all indices are unique
- assert self.indices.unique().nelement() == self.indices.nelement()
-
- def add_trajectories(self, trajectory_list: List[Tensor], indices: Tensor) \
- -> None:
- """Add new subset of trajectories to set.
-
- Args:
- trajectory_list: List of new ``(T, *)`` state trajectories.
- indices: indices associated with on-disk filenames.
- """
- self.trajectories.extend(trajectory_list)
- for trajectory in trajectory_list:
- self.slices.add_slices_from_trajectory(trajectory)
- # pylint: disable=no-member
- self.indices = torch.cat([self.indices, indices])
-
-
-class ExperimentDataManager:
- r"""Management object for maintaining training, validation, and testing
- data for an experiment.
-
- Loads trajectories stored in standard location associated with provided
- storage directory; splits into train/valid/test sets; and instantiates
- transformations for each set of data as a :py:class:`TrajectorySet`\ .
- """
- trajectory_dir: str
- """Directory in which trajectory files are stored."""
- config: DataConfig
- """Configuration for manipulating data."""
- train_set: TrajectorySet
- """Training trajectory set."""
- valid_set: TrajectorySet
- """Validation trajectory set."""
- test_set: TrajectorySet
- """Test trajectory set."""
- n_sorted: int
- """Number of files on disk split into (train, valid, test) sets so far."""
-
- def __init__(self, storage: str, config: DataConfig,
- initial_split: Optional[Tuple[Tensor, Tensor, Tensor]] = None,
- use_ground_truth: bool = False) -> None:
- """
- Args:
- storage: Storage directory to source trajectories from.
- config: Configuration object.
- initial_split: Optionally, lists of trajectory indices that
- should be sorted into (train, valid, test) sets from the
- beginning.
- use_ground_truth: Whether trajectories should be sourced from
- ground-truth or learning data.
- """
- if use_ground_truth:
- self.trajectory_dir = file_utils.ground_truth_data_dir(storage)
- else:
- self.trajectory_dir = file_utils.learning_data_dir(storage)
- self.config = config
- self.train_set = self.make_empty_trajectory_set()
- self.valid_set = self.make_empty_trajectory_set()
- self.test_set = self.make_empty_trajectory_set()
- self.n_sorted = 0
- if initial_split:
- self.extend_trajectory_sets(initial_split)
-
- @property
- def _trajectory_sets(
- self) -> Tuple[TrajectorySet, TrajectorySet, TrajectorySet]:
- """getter for tuple of (train, valid, test) set."""
- return self.train_set, self.valid_set, self.test_set
-
- def trajectory_set_indices(self) -> Tuple[Tensor, Tensor, Tensor]:
- """The sets of indices associated with the (train, valid,
- test) trajectories."""
- index_lists = [
- trajectory_set.indices for trajectory_set in self._trajectory_sets
- ]
- return cast(Tuple[Tensor, Tensor, Tensor], tuple(index_lists))
-
- def make_empty_trajectory_set(self) -> TrajectorySet:
- r"""Instantiates an empty :py:class:`TrajectorySet` associated with
- the time slice configuration contained in :py:attr:`config`\ ."""
- slice_dataset = TrajectorySliceDataset(self.config.slice_config)
- return TrajectorySet(slices=slice_dataset)
-
- def extend_trajectory_sets(
- self, index_lists: Tuple[Tensor, Tensor, Tensor]) -> None:
- """Supplement each of (train, valid, test) trajectory sets with
- provided trajectories, listed by their on-disk indices.
-
- Args:
- index_lists: Lists of trajectory indices for each set.
- """
- for trajectory_set, trajectory_indices in zip(self._trajectory_sets,
- index_lists):
- trajectories = [
- torch.load(
- file_utils.trajectory_file(self.trajectory_dir,
- trajectory_index))
- for trajectory_index in trajectory_indices
- ]
- trajectory_set.add_trajectories(trajectories, trajectory_indices)
- self.n_sorted += trajectory_indices.nelement()
-
- def get_updated_trajectory_sets(
- self) -> Tuple[TrajectorySet, TrajectorySet, TrajectorySet]:
- """Returns an up-to-date partition of trajectories on disk.
-
- Checks if some trajectories on disk have yet to be sorted,
- and supplements the (train, valid, test) sets with these additional
- trajectories before returning the updated sets.
-
- Returns:
- Training set.
- Validation set.
- Test set.
- """
- config = self.config
- n_on_disk = file_utils.get_trajectory_count(self.trajectory_dir)
- if n_on_disk != self.n_sorted:
- n_unsorted = n_on_disk - self.n_sorted
- n_train = round(n_unsorted * config.train_fraction)
- n_valid = round(n_unsorted * config.valid_fraction)
- n_remaining = n_unsorted - n_valid - n_train
- n_test = min(n_remaining, round(n_unsorted * config.test_fraction))
-
- n_requested = n_train + n_valid + n_test
- assert n_requested <= n_unsorted
-
- # pylint: disable=no-member
- trajectory_order = torch.randperm(n_unsorted) + self.n_sorted
- train_indices = trajectory_order[:n_train]
- trajectory_order = trajectory_order[n_train:]
-
- valid_indices = trajectory_order[:n_valid]
- trajectory_order = trajectory_order[n_valid:]
- test_indices = trajectory_order[:n_test]
-
- self.extend_trajectory_sets(
- (train_indices, valid_indices, test_indices))
-
- return self._trajectory_sets
diff --git a/dair_pll_old/dair_pll/deep_learnable_model.py b/dair_pll_old/dair_pll/deep_learnable_model.py
deleted file mode 100644
index 1bc6663..0000000
--- a/dair_pll_old/dair_pll/deep_learnable_model.py
+++ /dev/null
@@ -1,107 +0,0 @@
-from abc import ABC, abstractmethod
-
-import torch
-from torch import Tensor, nn
-from torch.nn import Module, Parameter
-
-
-class DeepLearnableModel(ABC, Module):
- mean: Parameter
- std_dev: Parameter
-
- def __init__(self, in_size):
- super().__init__()
- self.mean = Parameter(torch.ones(in_size), requires_grad=False)
- self.std = Parameter(torch.ones(in_size), requires_grad=False)
-
- @abstractmethod
- def sequential_eval(self, x: Tensor, carry: Tensor) -> Tensor:
- pass
-
- def set_normalization(self, x: Tensor) -> None:
- # flatten x into shape num_points * in_size
- x = torch.flatten(x, end_dim=-2)
- self.mean = Parameter(x.mean(dim=0), requires_grad=False)
- self.std = Parameter(x.std(dim=0), requires_grad=False)
-
- def normalize(self, x: Tensor) -> Tensor:
- return (x - self.mean) / self.std
-
-
-class DeepRecurrentModel(DeepLearnableModel):
-
- def __init__(self, in_size: int, hidden_size: int, out_size: int,
- layers: int, nonlinearity: Module) -> None:
- super().__init__(in_size)
- encode = True
- if encode:
- self.encoder = _mlp(in_size, hidden_size, hidden_size, layers // 2,
- nonlinearity)
- else:
- self.encoder = lambda x: x
- self.decoder = _mlp(hidden_size, hidden_size, out_size,
- layers - (layers // 2), nonlinearity)
- rnn_in_size = hidden_size if encode else in_size
- self.recurrent = nn.GRU(input_size=rnn_in_size,
- hidden_size=hidden_size,
- num_layers=1,
- batch_first=True)
-
- def forward(self, x: Tensor, carry: Tensor) -> Tensor:
- (next_recurrent_output, carry) = self.sequential_eval(x, carry)
- return self.decoder(next_recurrent_output), carry
-
- def sequential_eval(self, x: Tensor, carry: Tensor) -> Tensor:
- # pdb.set_trace()
- # x is B x L x N
- carry = carry.transpose(0, 1)
- for i in range(x.shape[1]):
- xi = self.normalize(x[:, i:(i + 1), :])
- recurrent_output, carry = self.recurrent(self.encoder(xi), carry)
- return recurrent_output, carry.transpose(0, 1)
-
-
-def _mlp(in_size: int, hidden_size: int, out_size: int, layers: int,
- nonlinearity: Module) -> Module:
- modules = []
- if layers == 0:
- return nn.Linear(in_size, out_size)
- modules.append(nn.Linear(in_size, hidden_size))
- for i in range(layers - 1):
- modules.append(nonlinearity())
- modules.append(nn.Linear(hidden_size, hidden_size))
- modules.append(nonlinearity())
- modules.append(nn.Linear(hidden_size, out_size))
- return nn.Sequential(*modules)
-
-
-class MLP(DeepLearnableModel):
-
- def __init__(self, in_size: int, hidden_size: int, out_size: int,
- layers: int, nonlinearity: Module) -> None:
- super().__init__(in_size)
- self.net = _mlp(in_size, hidden_size, out_size, layers, nonlinearity)
-
- def forward(self, x: Tensor, carry: Tensor) -> Tensor:
- return self.sequential_eval(x, carry)
-
- def sequential_eval(self, x: Tensor, carry: Tensor) -> Tensor:
- # x is B x L x N
- return self.net(self.normalize(x[:, -1, :])).unsqueeze(1), carry
-
-
-class ZeroModel(DeepLearnableModel):
-
- def __init__(self, in_size: int, hidden_size: int, out_size: int,
- layers: int, nonlinearity: Module) -> None:
- super().__init__(in_size)
- self.out_size = out_size
- self.dummy_param = torch.nn.Parameter(torch.tensor(1.))
-
- def forward(self, x: Tensor, carry: Tensor) -> Tensor:
- return self.sequential_eval(x, carry)
-
- def sequential_eval(self, x: Tensor, carry: Tensor) -> Tensor:
- # x is B x L x N
- return self.dummy_param * torch.zeros(
- (x.shape[0], 1, self.out_size)), carry
diff --git a/dair_pll_old/dair_pll/deep_learnable_system.py b/dair_pll_old/dair_pll/deep_learnable_system.py
deleted file mode 100644
index 360a143..0000000
--- a/dair_pll_old/dair_pll/deep_learnable_system.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from abc import ABC
-from dataclasses import dataclass
-from typing import Tuple, Type, Optional, cast
-
-import pdb
-
-import torch
-from torch import Tensor
-from torch.nn import Module
-
-from dair_pll.deep_learnable_model import DeepLearnableModel, DeepRecurrentModel
-from dair_pll.experiment import SupervisedLearningExperiment
-from dair_pll.experiment_config import SystemConfig, \
- SupervisedLearningExperimentConfig
-from dair_pll.integrator import Integrator, VelocityIntegrator, \
- PartialStepCallback
-from dair_pll.state_space import StateSpace
-from dair_pll.system import System
-
-
-@dataclass
-class DeepLearnableSystemConfig(SystemConfig):
- integrator_type: Type[Integrator] = VelocityIntegrator
- layers: int = 1
- nonlinearity: Module = torch.nn.ReLU
- hidden_size: int = 128
- model_constructor: Type[DeepLearnableModel] = DeepRecurrentModel
- represent_geometry_as: str = 'polygon'
-
-
-class DeepLearnableSystem(System):
- model: Module
-
- def __init__(self,
- base_system: System,
- config: DeepLearnableSystemConfig,
- training_data: Optional[Tensor] = None) -> None:
- space = base_system.space
- output_size = config.integrator_type.calc_out_size(space)
-
- model = config.model_constructor(space.n_x, config.hidden_size,
- output_size, config.layers,
- config.nonlinearity)
- if not (training_data is None):
- model.set_normalization(training_data)
-
- integrator = config.integrator_type(space, model,
- base_system.integrator.dt)
-
- super().__init__(space, integrator)
- self.model = model
- self.set_carry_sampler(lambda: torch.zeros((1, 1, config.hidden_size)))
-
- def preprocess_initial_condition(self, x_0: Tensor,
- carry_0: Tensor) -> Tuple[Tensor, Tensor]:
- """Preload initial condition."""
- if len(x_0.shape) > 1 and x_0.shape[1] > 1:
- # recurrent start, preload trajectory
- x_pre = x_0[..., :(-1), :]
- _, carry_0 = self.model.sequential_eval(x_pre, carry_0)
- return x_0[..., (-1):, :], carry_0
- else:
- return x_0, carry_0
-
-
-class DeepLearnableExperiment(SupervisedLearningExperiment, ABC):
-
- def get_learned_system(self, train_states: Tensor) -> System:
- deep_learnable_config = cast(DeepLearnableSystemConfig,
- self.config.learnable_config)
-
- return DeepLearnableSystem(self.get_base_system(),
- deep_learnable_config, train_states)
diff --git a/dair_pll_old/dair_pll/deep_support_function.py b/dair_pll_old/dair_pll/deep_support_function.py
deleted file mode 100644
index ed33198..0000000
--- a/dair_pll_old/dair_pll/deep_support_function.py
+++ /dev/null
@@ -1,311 +0,0 @@
-"""Modelling and manipulation of convex support functions."""
-from typing import Callable, Tuple, List
-
-import pdb
-
-import torch
-import torch.nn
-from scipy.spatial import ConvexHull # type: ignore
-from torch import Tensor
-from torch.nn import Parameter, ParameterList, Module
-
-from dair_pll.system import MeshSummary
-from dair_pll.tensor_utils import pbmm, rotation_matrix_from_one_vector
-
-# pylint: disable=E1103
-_LINEAR_SPACE = torch.linspace(-1, 1, steps=8)
-_GRID = torch.cartesian_prod(_LINEAR_SPACE, _LINEAR_SPACE, _LINEAR_SPACE)
-_SURFACE = _GRID[_GRID.abs().max(dim=-1).values >= 1.0]
-_SURFACE = _SURFACE / _SURFACE.norm(dim=-1, keepdim=True)
-_SURFACE_ROTATIONS = rotation_matrix_from_one_vector(_SURFACE, 2)
-
-
-def get_mesh_summary_from_polygon(polygon) -> MeshSummary:
- """Assuming a standard ordering of vertices for a ``Polygon``
- representation, produce a ``MeshSummary`` of this sparse mesh.
-
- Note:
- This is a hack since it only works for ``Polygon``\s of a particular
- structure. That structure matches that provided in the example assets
- ``contactnets_cube.obj`` and ``contactnets_elbow_half.obj``.
-
- Args:
- polygon: A ``Polygon`` ``CollisionGeometry``.
-
- Returns:
- A ``MeshSummary`` of the polygon.
- """
- # Use arbitrary direction to query the Polygon's vertices (value does not
- # matter).
- arbitrary_direction = torch.ones((1,3))
- vertices = polygon.get_vertices(
- arbitrary_direction).squeeze(0).clone().detach()
-
- hull = ConvexHull(vertices.numpy())
- faces = Tensor(hull.simplices).to(torch.long) # type: ignore
-
- return MeshSummary(vertices=vertices, faces=faces)
-
-
-def extract_obj_from_support_function(
- support_function: Callable[[Tensor], Tensor]) -> str:
- """Given a support function, extracts a Wavefront obj representation.
-
- Args:
- support_function: Callable support function.
-
- Returns:
- Wavefront .obj string
- """
- mesh_summary = extract_mesh_from_support_function(support_function)
- return extract_obj_from_mesh_summary(mesh_summary)
-
-
-def extract_obj_from_mesh_summary(mesh_summary: MeshSummary) -> str:
- """Given a mesh summary, extracts a Wavefront obj representation.
-
- Args:
- mesh_summary: Object vertices and face indices in the form of a
- ``MeshSummary``.
-
- Returns:
- Wavefront .obj string
- """
- normals = extract_outward_normal_hyperplanes(
- mesh_summary.vertices.unsqueeze(0),
- mesh_summary.faces.unsqueeze(0)
- )[0].squeeze(0)
-
- obj_string = ""
- for vertex in mesh_summary.vertices:
- vertex_string = " ".join([str(v_i.item()) for v_i in vertex])
- obj_string += f'v {vertex_string}\n'
-
- obj_string += '\n\n'
-
- for normal in normals:
- normal_string = " ".join([str(n_i.item()) for n_i in normal])
- obj_string += f'vn {normal_string}\n'
-
- obj_string += '\n\n'
-
- for face_index, face in enumerate(mesh_summary.faces):
- face_string = " ".join([f'{f_i.item() + 1}//{face_index + 1}' for f_i in face])
- obj_string += f'f {face_string}\n'
-
- return obj_string
-
-
-def extract_outward_normal_hyperplanes(vertices: Tensor, faces: Tensor):
- r"""Extract hyperplane representation of convex hull from vertex-plane
- representation.
-
- Constructs a set of (outward) normal vectors and intercept values.
- Additionally, notes a boolean value that is ``True`` iff the face vertices
- are in counter-clockwise order when viewed from the outside.
-
- Mathematically for a face :math:`(v_1, v_2, v_3)`\ , in counter-clockwise
- order, this function returns :math:`\hat n`\ , the unit vector in the
- :math:`(v_2 - v_1) \times (v_3 - v_`1)` direction, and intercept
- :math:`d = \hat n \cdot v_1`\ .
-
- Args:
- vertices: ``(*, N, 3)`` batch of polytope vertices.
- faces: ``(*, M, 3)`` batch of polytope triangle face vertex indices.
-
- Returns:
- ``(*, M, 3)`` face outward normals.
- ``(*, M)`` whether each face is in counter-clockwise order.
- ``(*, M)`` face hyperplane intercepts.
- """
- batch_range = torch.arange(vertices.shape[0]).unsqueeze(1).repeat(
- (1, faces.shape[-2]))
- centroids = vertices.mean(dim=-2, keepdim=True)
- v_a = vertices[batch_range, faces[..., 0]]
- v_b = vertices[batch_range, faces[..., 1]]
- v_c = vertices[batch_range, faces[..., 2]]
- outward_normals = torch.cross(v_b - v_a, v_c - v_a)
- outward_normals /= outward_normals.norm(dim=-1, keepdim=True)
- backwards = (outward_normals * (v_a - centroids)).sum(dim=-1) < 0.
- outward_normals[backwards] *= -1
- extents = (v_a * outward_normals).sum(dim=-1)
- return outward_normals, backwards, extents
-
-
-def extract_mesh_from_support_function(
- support_function: Callable[[Tensor], Tensor]) -> MeshSummary:
- """Given a support function, extracts a vertex/face mesh.
-
- Args:
- support_function: Callable support function.
-
- Returns:
- Object vertices and face indices.
- """
- support_points = support_function(_SURFACE).detach()
- support_point_hashes = set()
- unique_support_points = []
-
- # remove duplicate vertices
- for vertex in support_points:
- vertex_hash = hash(vertex.numpy().tobytes())
- if vertex_hash in support_point_hashes:
- continue
- support_point_hashes.add(vertex_hash)
- unique_support_points.append(vertex)
-
- vertices = torch.stack(unique_support_points)
- hull = ConvexHull(vertices.numpy())
- faces = Tensor(hull.simplices).to(torch.long) # type: ignore
-
- _, backwards, _ = extract_outward_normal_hyperplanes(
- vertices.unsqueeze(0), faces.unsqueeze(0))
- backwards = backwards.squeeze(0)
- faces[backwards] = faces[backwards].flip(-1)
-
- return MeshSummary(vertices=support_points, faces=faces)
-
-
-class HomogeneousICNN(Module):
- r""" Homogeneous Input-convex Neural Networks.
-
- Implements a positively-homogenous version of an ICNN :cite:p:`AmosICNN2017`\ .
-
- These networks have the structure :math:`f(d)` where
-
- .. math::
- \begin{align}
- h_0 &= \sigma(W_{d,0} d),\\
- h_i &= \sigma(W_{d,i} d + W_{h,i} h_{i-1}),\\
- f(d) &= W_{h,D} h_D,
- \end{align}
-
- where each :math:`W_{h,i} \geq 0` and :math:`\sigma` is a convex and
- monotonic :py:class:`~torch.nn.LeakyReLU`\ .
- """
- activation: Module
- r"""Activation module (:py:class:`~torch.nn.LeakyReLU`\ )."""
- hidden_weights: ParameterList
- r"""Scale of hidden weight matrices :math:`W_{h,i} \geq 0`\ ."""
- input_weights: ParameterList
- r"""List of input-injection weight matrices :math:`W_{d,i}`\ ."""
- output_weight: Parameter
- r"""Output weight vector :math:`W_{h,D} \geq 0`\ ."""
-
- def __init__(self,
- depth: int,
- width: int,
- negative_slope: float = 0.5,
- scale=1.0) -> None:
- r"""
- Args:
- depth: Network depth :math:`D`\ .
- width: Network width.
- negative_slope: Negative slope of LeakyReLU activation.
- scale: Length scale of object in meters.
- """
- assert 0.0 <= negative_slope < 1.0
- super().__init__()
-
- hidden_weights = []
- scale_hidden = 2 * (2.0 / (1 + negative_slope**2))**0.5 / width
- for _ in range(depth - 1):
- hidden_weight = 2 * (torch.rand((width, width)) - 0.5)
- hidden_weight *= scale_hidden
- hidden_weights.append(Parameter(hidden_weight, requires_grad=True))
-
- input_weights = []
- for layer in range(depth):
- input_weight = torch.empty((3, width))
- torch.nn.init.kaiming_uniform(input_weight)
- if layer > 0:
- input_weight *= 2**(-0.5)
- input_weights.append(Parameter(input_weight, requires_grad=True))
-
- scale_out = scale * 2 * (2.0 / (width * (1 + negative_slope**2)))**0.5
- output_weight = 2 * (torch.rand(width) - 0.5) * scale_out
-
- self.hidden_weights = ParameterList(hidden_weights)
- self.input_weights = ParameterList(input_weights)
- self.output_weight = Parameter(output_weight, requires_grad=True)
- self.activation = torch.nn.LeakyReLU(negative_slope=negative_slope)
-
- def abs_weights(self) -> Tuple[List[Tensor], Tensor]:
- r"""Returns non-negative version of hidden weight matrices by taking
- absolute value of :py:attr:`hidden_weights` and
- :py:attr:`output_weight`\ ."""
- abs_hidden_wts = [torch.abs(weight) for weight in self.hidden_weights]
- return abs_hidden_wts, torch.abs(self.output_weight)
-
- def activation_jacobian(self, activations: Tensor) -> Tensor:
- """Returns flattened diagonal Jacobian of LeakyReLU activation.
-
- The jacobian is simply ``1`` at indices where the activations are
- positive and :py:attr:`self.activation.negative_slope` otherwise.
-
- Args:
- activations: `(*, width)` output of activation function for some
- layer.
-
- Returns:
- `(*, width)` activation jacobian.
- """
- jacobian = torch.ones_like(activations)
- jacobian[activations <= 0] *= self.activation.negative_slope
- return jacobian
-
- def network_activations(self,
- directions: Tensor) -> Tuple[List[Tensor], Tensor]:
- """Forward evaluation of the network activations
-
- Args:
- directions: ``(*, 3)`` network inputs.
-
- Returns:
- List of ``(*, width)`` hidden layer activations.
- ``(*,)`` network output
- """
- hiddens = []
- hidden_wts, output_wt = self.abs_weights()
- input_wts = self.input_weights
- # (*, 3) x (*, 3, W)
- directions = directions.float()
- input_wts = input_wts.float()
- hiddens.append(self.activation(pbmm(directions, input_wts[0])))
- # print(hiddens[-1].norm(dim=-1).mean(dim=0))
- for hidden_wt, input_wt in zip(hidden_wts, input_wts[1:]):
- linear_hidden = pbmm(hiddens[-1].float(), hidden_wt.float())
- linear_input = pbmm(directions, input_wt)
- linear_output = linear_hidden + linear_input
- hiddens.append(self.activation(linear_output))
- output = pbmm(hiddens[-1].float(), output_wt.float())
- return hiddens, output.squeeze(-1)
-
- def forward(self, directions: Tensor) -> Tensor:
- """Evaluates support function Jacobian at provided inputs.
-
- Args:
- directions: ``(*, 3)`` network inputs.
-
- Returns:
- ``(*, 3)`` network input Jacobian.
- """
- hidden_wts, output_wt = self.abs_weights()
- hiddens, _ = self.network_activations(directions)
- input_wts = self.input_weights
-
- hidden_jacobian = (output_wt.expand(hiddens[-1].shape) *
- self.activation_jacobian(hiddens[-1])).unsqueeze(-1)
-
- jacobian = torch.zeros_like(directions)
- layer_bundle = zip(reversed(hiddens[:-1]), reversed(hidden_wts),
- reversed(list(input_wts[1:])))
-
- for hidden, hidden_wt, input_wt in layer_bundle:
- jacobian += pbmm(input_wt.float(), hidden_jacobian.float()).squeeze(-1)
-
- hidden_jacobian = pbmm(hidden_wt, hidden_jacobian) * \
- self.activation_jacobian(hidden).unsqueeze(-1)
- jacobian += pbmm(input_wts[0], hidden_jacobian).squeeze(-1)
-
- return jacobian
diff --git a/dair_pll_old/dair_pll/drake_experiment.py b/dair_pll_old/dair_pll/drake_experiment.py
deleted file mode 100644
index 6077a98..0000000
--- a/dair_pll_old/dair_pll/drake_experiment.py
+++ /dev/null
@@ -1,508 +0,0 @@
-"""Wrappers for Drake/ContactNets multibody experiments."""
-import time
-from abc import ABC
-from dataclasses import field, dataclass
-from enum import Enum
-from typing import Optional, cast, Dict, Callable
-import pdb
-
-import torch
-from torch import Tensor
-from torch.utils.data import DataLoader
-
-from dair_pll import file_utils
-# from dair_pll import vis_utils
-from dair_pll.deep_learnable_system import DeepLearnableExperiment
-from dair_pll.drake_system import DrakeSystem
-from dair_pll.experiment import SupervisedLearningExperiment, \
- LEARNED_SYSTEM_NAME, PREDICTION_NAME, TARGET_NAME, \
- TRAJECTORY_PENETRATION_NAME, LOGGING_DURATION
-from dair_pll.experiment_config import SystemConfig, \
- SupervisedLearningExperimentConfig
-from dair_pll.hyperparameter import Float
-from dair_pll.multibody_learnable_system import \
- MultibodyLearnableSystem, LOSS_INERTIA_AGNOSTIC, LOSS_BALANCED, \
- LOSS_POWER, LOSS_PLL_ORIGINAL, LOSS_CONTACT_VELOCITY, LOSS_VARIATIONS, \
- LOSS_VARIATION_NUMBERS
-from dair_pll.system import System, SystemSummary
-
-
-@dataclass
-class DrakeSystemConfig(SystemConfig):
- urdfs: Dict[str, str] = field(default_factory=dict)
-
-
-class MultibodyLosses(Enum):
- PREDICTION_LOSS = 1
- CONTACTNETS_LOSS = 2
-
-
-@dataclass
-class DrakeMultibodyLearnableExperimentConfig(SupervisedLearningExperimentConfig
- ):
- visualize_learned_geometry: bool = True
- """Whether to use learned geometry in trajectory overlay visualization."""
-
-
-@dataclass
-class MultibodyLearnableSystemConfig(DrakeSystemConfig):
- loss: MultibodyLosses = MultibodyLosses.PREDICTION_LOSS
- """Whether to use ContactNets or prediction loss."""
- inertia_mode: int = 4
- """What inertial parameters to learn."""
- loss_variation: str = LOSS_POWER
- """What loss variation to use."""
- w_pred: float = 1.0
- """Weight of prediction term in ContactNets loss (suggested keep at 1.0)."""
- w_comp: Float = Float(1e0, log=True) #1e-1
- """Weight of complementarity term in ContactNets loss."""
- w_diss: Float = Float(1e0, log=True)
- """Weight of dissipation term in ContactNets loss."""
- w_pen: Float = Float(1e0, log=True) #1e1
- """Weight of penetration term in ContactNets loss."""
- w_res: Float = Float(1e0, log=True)
- """Weight of residual norm in loss."""
- w_res_w: Float = Float(1e0, log=True)
- """Weight of residual weights in loss."""
- do_residual: bool = False
- """Whether to include a residual physics block."""
- network_width: int = 128
- """Width of residual network."""
- network_depth: int = 2
- """Depth of residual network."""
- represent_geometry_as: str = 'box'
- """How to represent geometry (box, mesh, or polygon)."""
- randomize_initialization: bool = True
- """Whether to randomize initialization."""
- g_frac: float = 1.0
- """What fraction of the true gravitational constant to use."""
-
-
-class DrakeExperiment(SupervisedLearningExperiment, ABC):
- base_drake_system: Optional[DrakeSystem]
- augmented_drake_system: Optional[DrakeSystem]
- visualization_system: Optional[DrakeSystem]
-
- def __init__(self, config: SupervisedLearningExperimentConfig) -> None:
- super().__init__(config)
- self.base_drake_system = None
- self.visualization_system = None
-
- def get_drake_system(self) -> DrakeSystem:
- has_property = hasattr(self, 'base_drake_system')
- if not has_property or self.base_drake_system is None:
- base_config = cast(DrakeSystemConfig, self.config.base_config)
- dt = self.config.data_config.dt
- self.base_drake_system = DrakeSystem(base_config.urdfs, dt)
- return self.base_drake_system
-
- def get_base_system(self) -> System:
- return self.get_drake_system()
-
- def get_augmented_system(self, additional_forces: str) -> DrakeSystem:
- """Get a ``DrakeSystem`` where the Drake multibody plant has additional
- forces in the ``applied_generalized_force`` or ``applied_spatial_force``
- input ports."""
- print("Getting augmented system!")
- has_property = hasattr(self, 'augmented_drake_system')
- if not has_property or self.augmented_drake_system is None:
- base_config = cast(DrakeSystemConfig, self.config.base_config)
- dt = self.config.data_config.dt
- self.augmented_drake_system = DrakeSystem(
- base_config.urdfs, dt, additional_forces=additional_forces)
- return self.augmented_drake_system
-
- def get_learned_drake_system(
- self, learned_system: System) -> Optional[DrakeSystem]:
- r"""If possible, constructs a :py:class:`DrakeSystem` -equivalent
- model of the given learned system, such as when the learned system is a
- :py:class:`MultibodyLearnableSystem`\ .
-
- Args:
- learned_system: System being learned in experiment.
-
- Returns:
- Drake version of learned system.
- """
- return None
-
- def visualizer_regeneration_is_required(self) -> bool:
- """Checks if visualizer should be regenerated, e.g. if learned
- geometries have been updated and need to be pushed to the visulizer.
- """
- return False
-
- # def get_visualization_system(self, learned_system: System) -> DrakeSystem:
- """Generate a dummy :py:class:`DrakeSystem` for visualizing comparisons
- between trajectories generated by the base system and something else,
- e.g. data.
-
- Implemented as a thin wrapper of
- ``vis_utils.generate_visualization_system()``, which generates a
- drake system where each model in the base
- :py:class:`DrakeSystem` has a duplicate, and visualization
- elements are repainted for visual distinction.
-
- Args:
- learned_system: Current trained learnable system.
-
- Returns:
- New :py:class:`DrakeSystem` with doubled state and repainted
- elements.
- """
- # Generate a new visualization system if it needs to use the updated
- # geometry, or if it hasn't been created yet.
- # regeneration_is_required = self.visualizer_regeneration_is_required()
- # if regeneration_is_required or self.visualization_system is None:
- # visualization_file = file_utils.get_trajectory_video_filename(
- # self.config.storage, self.config.run_name)
- # base_system = self.get_drake_system()
- # self.visualization_system = \
- # vis_utils.generate_visualization_system(
- # base_system,
- # visualization_file,
- # learned_system=self.get_learned_drake_system(learned_system)
- # )
-
- # return self.visualization_system
-
- def base_and_learned_comparison_summary(
- self, statistics: Dict, learned_system: System) -> SystemSummary:
- r"""Extracts a :py:class:`~dair_pll.system.SystemSummary` that compares
- the base system to the learned system.
-
- For Drake-based experiments, this comparison is implemented as
- overlaid videos of corresponding ground-truth and predicted
- trajectories. The nature of this video is described further in
- :py:mod:`dair_pll.vis_utils`\ .
-
- Additionally, manually defined trajectories are used to show the learned
- geometries. This is particularly useful for more expressive geometry
- types like meshes.
-
- Args:
- statistics: Dictionary of training statistics.
- learned_system: Most updated version of learned system during
- training.
-
- Returns:
- Summary containing overlaid video(s).
- """
- # visualization_system = self.get_visualization_system(learned_system)
-
- space = self.get_drake_system().space
- videos = {}
-
- # First do overlay prediction videos.
- for traj_num in [0]:
- for set_name in ['train', 'valid']:
- target_key = f'{set_name}_{LEARNED_SYSTEM_NAME}' + \
- f'_{TARGET_NAME}'
- prediction_key = f'{set_name}_{LEARNED_SYSTEM_NAME}' + \
- f'_{PREDICTION_NAME}'
- if not target_key in statistics:
- continue
- target_trajectory = Tensor(statistics[target_key][traj_num])
- prediction_trajectory = Tensor(
- statistics[prediction_key][traj_num])
- visualization_trajectory = torch.cat(
- (space.q(target_trajectory), space.q(prediction_trajectory),
- space.v(target_trajectory),
- space.v(prediction_trajectory)), -1)
- # video, framerate = vis_utils.visualize_trajectory(
- # visualization_system, visualization_trajectory)
- # videos[f'{set_name}_trajectory_prediction_{traj_num}'] = \
- # (video, framerate)
-
- # Second do geometry inspection videos -- only relevant for model-based.
- # if not type(self) == DrakeDeepLearnableExperiment:
- # geometry_inspection_traj = \
- # vis_utils.get_geometry_inspection_trajectory(learned_system)
- # target_trajectory = geometry_inspection_traj
- # prediction_trajectory = geometry_inspection_traj
- # visualization_trajectory = torch.cat(
- # (space.q(target_trajectory), space.q(prediction_trajectory),
- # space.v(target_trajectory), space.v(prediction_trajectory)), -1)
- # video, framerate = vis_utils.visualize_trajectory(
- # visualization_system, visualization_trajectory)
- # videos['geometry_inspection'] = (video, framerate)
-
- return SystemSummary(scalars={}, videos=videos, meshes={})
-
- def get_true_geometry_multibody_learnable_system(self
- ) -> MultibodyLearnableSystem:
-
- has_property = hasattr(self, 'true_geom_multibody_system')
- if not has_property or self.true_geom_multibody_system is None:
- oracle_system = self.get_oracle_system()
- dt = oracle_system.dt
- urdfs = oracle_system.urdfs
-
- self.true_geom_multibody_system = MultibodyLearnableSystem(
- init_urdfs=urdfs, dt=dt, inertia_mode=0, loss_variation=0,
- w_pred=1.0, w_comp=1.0, w_diss=1.0, w_pen=1.0, w_res=1.0,
- w_res_w=1.0, do_residual=False,
- represent_geometry_as = \
- self.config.learnable_config.represent_geometry_as,
- randomize_initialization = False)
-
- return self.true_geom_multibody_system
-
- def penetration_metric(self, x_pred: Tensor, _x_target: Tensor) -> Tensor:
- true_geom_system = self.get_true_geometry_multibody_learnable_system()
-
- if x_pred.dim() == 1:
- x_pred = x_pred.unsqueeze(0)
- assert x_pred.dim() == 2
- assert x_pred.shape[1] == true_geom_system.space.n_x
-
- n_steps = x_pred.shape[0]
-
- phi, _ = true_geom_system.multibody_terms.contact_terms(x_pred)
- phi = phi.detach().clone()
- smallest_phis = phi.min(dim=1).values
- return -smallest_phis[smallest_phis < 0].sum() / n_steps
-
- def extra_metrics(self) -> Dict[str, Callable[[Tensor, Tensor], Tensor]]:
- # Calculate penetration metric
- return {TRAJECTORY_PENETRATION_NAME: self.penetration_metric}
-
-
-class DrakeDeepLearnableExperiment(DrakeExperiment, DeepLearnableExperiment):
- pass
-
-
-class DrakeMultibodyLearnableExperiment(DrakeExperiment):
-
- def __init__(self, config: SupervisedLearningExperimentConfig) -> None:
- super().__init__(config)
- self.learnable_config = cast(MultibodyLearnableSystemConfig,
- self.config.learnable_config)
- if self.learnable_config.loss == MultibodyLosses.CONTACTNETS_LOSS:
- self.loss_callback = self.contactnets_loss
- elif self.learnable_config.loss == MultibodyLosses.PREDICTION_LOSS:
- self.loss_callback = self.prediction_with_regularization_loss
- else:
- raise RuntimeError(f"Loss {self.learnable_config.loss} not " + \
- f"recognized for Drake multibody experiment.")
-
- def get_learned_system(self, _: Tensor) -> MultibodyLearnableSystem:
- learnable_config = cast(MultibodyLearnableSystemConfig,
- self.config.learnable_config)
- output_dir = file_utils.get_learned_urdf_dir(self.config.storage,
- self.config.run_name)
- return MultibodyLearnableSystem(
- learnable_config.urdfs,
- self.config.data_config.dt,
- learnable_config.inertia_mode,
- learnable_config.loss_variation,
- w_pred = learnable_config.w_pred,
- w_comp = learnable_config.w_comp.value,
- w_diss = learnable_config.w_diss.value,
- w_pen = learnable_config.w_pen.value,
- w_res = learnable_config.w_res.value,
- w_res_w = learnable_config.w_res_w.value,
- output_urdfs_dir=output_dir,
- do_residual=learnable_config.do_residual,
- represent_geometry_as=learnable_config.represent_geometry_as,
- randomize_initialization=learnable_config.randomize_initialization,
- g_frac=learnable_config.g_frac)
-
- def write_to_wandb(self, epoch: int, learned_system: System,
- statistics: Dict) -> None:
- """In addition to extracting and writing training progress summary via
- the parent :py:meth:`Experiment.write_to_wandb` method, also make a
- breakdown plot of loss contributions for the ContactNets loss
- formulation.
-
- Args:
- epoch: Current epoch.
- learned_system: System being trained.
- statistics: Summary statistics for learning process.
- """
- assert self.wandb_manager is not None
-
- # begin recording wall-clock logging time.
- start_log_time = time.time()
-
- # To save space on W&B storage, only generate comparison videos at first
- # and best epoch, the latter of which is implemented in
- # :meth:`_evaluation`.
- skip_videos = False if epoch==0 else True
-
- epoch_vars, learned_system_summary = \
- self.build_epoch_vars_and_system_summary(statistics, learned_system,
- skip_videos=skip_videos)
-
- # Start computing individual loss components.
- # First get a batch sized portion of the shuffled training set.
- train_traj_set, _, _ = \
- self.learning_data_manager.get_updated_trajectory_sets()
- train_dataloader = DataLoader(
- train_traj_set.slices,
- batch_size=self.config.optimizer_config.batch_size.value,
- shuffle=True)
-
- # Calculate the average loss components.
- losses_pred, losses_comp, losses_pen, losses_diss = [], [], [], []
- residual_norm, residual_weight, inertia_cond_num = [], [], []
- for xy_i in train_dataloader:
- x_i: Tensor = xy_i[0]
- y_i: Tensor = xy_i[1]
-
- x = x_i[..., -1, :]
- x_plus = y_i[..., 0, :]
- u = torch.zeros(x.shape[:-1] + (0,))
-
- loss_pred, loss_comp, loss_pen, loss_diss = \
- learned_system.calculate_contactnets_loss_terms(x, u, x_plus)
- regularizers = \
- learned_system.get_regularization_terms(x, u, x_plus)
-
- losses_pred.append(loss_pred.clone().detach())
- losses_comp.append(loss_comp.clone().detach())
- losses_pen.append(loss_pen.clone().detach())
- losses_diss.append(loss_diss.clone().detach())
- residual_norm.append(regularizers[0].clone().detach())
- residual_weight.append(regularizers[1].clone().detach())
- inertia_cond_num.append(regularizers[2].clone().detach())
-
- def really_weird_fix_for_cluster_only(list_of_tensors):
- """For some reason, on the cluster only, the last item in the loss
- lists can be a different shape than the rest of the items, and this
- results in an error with the ``sum(losses_pred)`` below. For now,
- the fix (hack) is to just drop that last term.
-
- TODO: Figure out what is going on.
- """
- if (len(list_of_tensors) > 1) and \
- (list_of_tensors[-1].shape != list_of_tensors[0].shape):
- return list_of_tensors[:-1]
- return list_of_tensors
-
- losses_pred = really_weird_fix_for_cluster_only(losses_pred)
- losses_comp = really_weird_fix_for_cluster_only(losses_comp)
- losses_pen = really_weird_fix_for_cluster_only(losses_pen)
- losses_diss = really_weird_fix_for_cluster_only(losses_diss)
- residual_norm = really_weird_fix_for_cluster_only(residual_norm)
- residual_weight = really_weird_fix_for_cluster_only(residual_weight)
- inertia_cond_num = really_weird_fix_for_cluster_only(inertia_cond_num)
-
- # Calculate average and scale by hyperparameter weights.
- w_pred = self.learnable_config.w_pred
- w_comp = self.learnable_config.w_comp.value
- w_diss = self.learnable_config.w_diss.value
- w_pen = self.learnable_config.w_pen.value
- w_res = self.learnable_config.w_res.value
- w_res_w = self.learnable_config.w_res_w.value
-
- avg_loss_pred = w_pred*cast(Tensor, sum(losses_pred) \
- / len(losses_pred)).mean()
- avg_loss_comp = w_comp*cast(Tensor, sum(losses_comp) \
- / len(losses_comp)).mean()
- avg_loss_pen = w_pen*cast(Tensor, sum(losses_pen) \
- / len(losses_pen)).mean()
- avg_loss_diss = w_diss*cast(Tensor, sum(losses_diss) \
- / len(losses_diss)).mean()
- avg_residual_norm = w_res*cast(Tensor, sum(residual_norm) \
- / len(residual_norm)).mean()
- avg_residual_weight = w_res*cast(Tensor, sum(residual_weight) \
- / len(residual_weight)).mean()
- avg_inertia_cond_num = 1e-5 * cast(Tensor, sum(inertia_cond_num) \
- / len(inertia_cond_num)).mean()
-
- avg_loss_total = torch.sum(avg_loss_pred + avg_loss_comp + \
- avg_loss_pen + avg_loss_diss + \
- avg_residual_norm + avg_residual_weight + \
- avg_inertia_cond_num)
-
- loss_breakdown = {'loss_total': avg_loss_total,
- 'loss_pred': avg_loss_pred,
- 'loss_comp': avg_loss_comp,
- 'loss_pen': avg_loss_pen,
- 'loss_diss': avg_loss_diss,
- 'loss_res_norm': avg_residual_norm,
- 'loss_res_weight': avg_residual_weight,
- 'loss_inertia_cond': avg_inertia_cond_num}
-
- # Include the loss components into system summary.
- epoch_vars.update(loss_breakdown)
-
- # Overwrite the logging time.
- logging_duration = time.time() - start_log_time
- epoch_vars[LOGGING_DURATION] = logging_duration
-
- self.wandb_manager.update(epoch, epoch_vars,
- learned_system_summary.videos,
- learned_system_summary.meshes)
-
- def visualizer_regeneration_is_required(self) -> bool:
- return cast(SupervisedLearningExperimentConfig,
- self.config).update_geometry_in_videos
-
- def get_learned_drake_system(
- self, learned_system: System) -> Optional[DrakeSystem]:
- if self.visualizer_regeneration_is_required():
- new_urdfs = cast(MultibodyLearnableSystem,
- learned_system).generate_updated_urdfs('vis')
- return DrakeSystem(new_urdfs, self.get_drake_system().dt,
- g_frac=self.config.learnable_config.g_frac)
- return None
-
- def prediction_with_regularization_loss(
- self, x_past: Tensor, x_future: Tensor, system: System,
- keep_batch: bool = False) -> Tensor:
- """Returns prediction loss with possibly some regularization terms,
- e.g., regularization on the size/weights of a residual network, if there
- is one.
- """
- w_res = self.learnable_config.w_res.value
- w_res_w = self.learnable_config.w_res_w.value
-
- prediction_loss = self.prediction_loss(x_past, x_future, system,
- keep_batch)
-
- x = x_past[..., -1, :]
- u = torch.zeros(x.shape[:-1] + (0,))
- x_plus = x_future[..., 0, :]
-
- regularizers = system.get_regularization_terms(x, u, x_plus)
- if len(regularizers) > 3:
- assert NotImplementedError(
- "Don't recognize more than three regularization terms.")
- elif len(regularizers) == 3:
- reg_term = (regularizers[0] * w_res) + \
- (regularizers[1] * w_res_w) + \
- (regularizers[2] * 1e-5)
- else:
- reg_term = torch.zeros_like(prediction_loss)
-
- if not keep_batch:
- prediction_loss = prediction_loss.mean()
- reg_term = reg_term.mean()
-
- return prediction_loss + reg_term
-
- def contactnets_loss(self,
- x_past: Tensor,
- x_future: Tensor,
- system: System,
- keep_batch: bool = False) -> Tensor:
- r""" :py:data:`~dair_pll.experiment.LossCallbackCallable`
- which applies the ContactNets [1] loss to the system.
-
- References:
- [1] S. Pfrommer*, M. Halm*, and M. Posa. "ContactNets: Learning
- Discontinuous Contact Dynamics with Smooth, Implicit
- Representations," Conference on Robotic Learning, 2020,
- https://proceedings.mlr.press/v155/pfrommer21a.html
- """
- assert isinstance(system, MultibodyLearnableSystem)
- x = x_past[..., -1, :]
- # pylint: disable=E1103
- u = torch.zeros(x.shape[:-1] + (0,))
- x_plus = x_future[..., 0, :]
- loss = system.contactnets_loss(x, u, x_plus)
- if not keep_batch:
- loss = loss.mean()
- return loss
diff --git a/dair_pll_old/dair_pll/drake_state_converter.py b/dair_pll_old/dair_pll/drake_state_converter.py
deleted file mode 100644
index 50c15a5..0000000
--- a/dair_pll_old/dair_pll/drake_state_converter.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""Utility classes for interfacing with Drake's internal state format.
-
-Classes herein mainly support the implementation of ``DrakeStateConverter``. In
-order to make Drake states compatible with available ``StateSpace``
-inheriting classes, users must define the drake system by a collection of
-URDF files, each of which contains a model for exactly one floating- or
-fixed-base rigid multibody chain. This allows for the system to be modeled as
-having a ``ProductSpace`` state space, where each factor space is a
-``FloatingBaseSpace`` or ``FixedBaseSpace``.
-
-For flexible usage, the conversion is between contexts and numpy ``ndarray``
-types. This is particularly useful as it allows pydrake symbolic types to
-be used, facilitating differentiable geometric analysis of the relationship
-between the coordinate systems in ``multibody_terms.py``.
-"""
-from typing import Tuple, List
-
-import numpy as np
-from pydrake.multibody.plant import MultibodyPlant # type: ignore
-from pydrake.multibody.tree import ModelInstanceIndex # type: ignore
-from pydrake.systems.framework import Context # type: ignore
-
-from dair_pll import quaternion
-from dair_pll import state_space
-
-
-def state_ndarray_reformat(x: np.ndarray) -> np.ndarray:
- """Resizes Drake coordinates to ``StateSpace`` batch."""
- return np.copy(x).reshape(1, x.size)
-
-
-def drake_ndarray_reformat(x: np.ndarray) -> np.ndarray:
- """Resizes ``StateSpace`` batch to Drake coordinates."""
- return np.copy(x).reshape(x.size)
-
-
-class DrakeFloatingBaseStateConverter:
- """Converts between the ``np.ndarray`` state coordinates of a Drake
- MultibodyPlant model instance and a floating-base open kinematic chain.
-
- When a Drake model instance is a single floating-base rigid chain,
- it represents the configuration in tangent bundle of SE(3) x R^n_joints,
- with coordinates as a quaternion; world-frame floating base
- c.o.m., joint positions/angles, world-axes floating base angular/linear
- velocity, and joint velocities.
-
- Conversion between coordinate sets is then simply a frame transformation
- on the angular velocity between world and floating base frame.
- """
-
- @staticmethod
- def drake_to_state(q_drake: np.ndarray,
- v_drake: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
- """Formats configuration and velocity into row vectors, and rotates
- angular velocity into body frame."""
- q = state_ndarray_reformat(q_drake)
- v = state_ndarray_reformat(v_drake)
- v[..., :3] = quaternion.rotate(quaternion.inverse(q[..., :4]),
- v[..., :3])
- return q, v
-
- @staticmethod
- def state_to_drake(q: np.ndarray,
- v: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
- """Formats configuration and velocity into squeezed vectors,
- and rotates angular velocity into world frame."""
- q_drake = state_ndarray_reformat(q)
- v_drake = state_ndarray_reformat(v)
- v_drake[..., :3] = quaternion.rotate(q[..., :4].reshape(1, -1),
- v_drake[..., :3])
- q_drake = q_drake.reshape(q_drake.size)
- v_drake = v_drake.reshape(v_drake.size)
- return q_drake, v_drake
-
-
-class DrakeFixedBaseStateConverter:
- """Converts between the ``np.ndarray`` state coordinates of a Drake
- MultibodyPlant model instance and a fixed-base open kinematic chain.
-
- When a Drake model instance is a single fixed-base rigid chain,
- it represents the configuration in tangent bundle of R^n_joints,
- with the same exact coordinate system that ``FixedBaseSpace`` uses.
- Therefore, conversion between these types is a simple passthrough that
- copies the coordinates in memory.
- """
-
- @staticmethod
- def drake_to_state(q_drake: np.ndarray,
- v_drake: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
- """Formats configuration and velocity into row vectors."""
- q = state_ndarray_reformat(q_drake)
- v = state_ndarray_reformat(v_drake)
- return q, v
-
- @staticmethod
- def state_to_drake(q: np.ndarray,
- v: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
- """Formats configuration and velocity into squeezed vectors."""
- q_drake = drake_ndarray_reformat(q)
- v_drake = drake_ndarray_reformat(v)
- return q_drake, v_drake
-
-
-class DrakeModelStateConverterFactory:
- """Factory class for selecting Drake-to-``BaseSpace`` coordinate
- conversions."""
-
- @staticmethod
- def state_to_drake(
- q: np.ndarray, v: np.ndarray,
- space: state_space.StateSpace) -> Tuple[np.ndarray, np.ndarray]:
- """Selects ``state_to_drake`` method based on presence of floating
- base from ``DrakeFloatingBaseStateConverter`` or
- ``DrakeFixedBaseStateConverter``."""
- if isinstance(space, state_space.FloatingBaseSpace):
- return DrakeFloatingBaseStateConverter.state_to_drake(q, v)
- if isinstance(space, state_space.FixedBaseSpace):
- return DrakeFixedBaseStateConverter.state_to_drake(q, v)
-
- raise TypeError('Argument "space" must be instance of type '
- 'FloatingBaseSpace or FixedBaseSpce!')
-
- @staticmethod
- def drake_to_state(
- q_drake: np.ndarray, v_drake: np.ndarray,
- space: state_space.StateSpace) -> Tuple[np.ndarray, np.ndarray]:
- """Selects ``drake_to_state`` method based on presence of floating
- base from ``DrakeFloatingBaseStateConverter`` or
- ``DrakeFixedBaseStateConverter``."""
- if isinstance(space, state_space.FloatingBaseSpace):
- return DrakeFloatingBaseStateConverter.drake_to_state(
- q_drake, v_drake)
- if isinstance(space, state_space.FixedBaseSpace):
- return DrakeFixedBaseStateConverter.drake_to_state(q_drake, v_drake)
-
- raise TypeError('Argument "space" must be instance of type '
- 'FloatingBaseSpace or FixedBaseSpce!')
-
-
-class DrakeStateConverter:
- """Utility namespace for conversion between Drake state format and
- ``ProductSpace`` formats.
-
- Given a ``MultibodyPlant`` and a complete list of its models,
- ``DrakeStateConverter`` converts between a numpy ``ndarray`` and ``Context``
- representation of the state space. This class leverages the
- one-open-kinematic-chain-per-model assumption to iterate over the models,
- and convert the factor space coordinates with
- ``DrakeModelStateConverterFactory``.
- """
-
- @staticmethod
- def context_to_state(plant: MultibodyPlant, plant_context: Context,
- model_ids: List[ModelInstanceIndex],
- space: state_space.ProductSpace) -> np.ndarray:
- """Retrieves ``ProductSpace``-formatted state from plant's context.
-
- Args:
- plant: plant from which to retrieve state.
- plant_context: plant's context which stores its state.
- model_ids: List of plant's models
- space: state space of output state.
-
- Returns:
- (space.n_x,) current state of plant.
- """
- qs = []
- vs = []
- spaces = space.spaces
- for model_id, model_space in zip(model_ids, spaces):
- q_drake = plant.GetPositions(plant_context, model_id)
- v_drake = plant.GetVelocities(plant_context, model_id)
- q, v = DrakeModelStateConverterFactory.drake_to_state(
- q_drake, v_drake, model_space)
- qs.append(q)
- vs.append(v)
- q = np.concatenate(qs, axis=-1)
- v = np.concatenate(vs, axis=-1)
- return np.concatenate([q, v], axis=-1).squeeze()
-
- @staticmethod
- def state_to_context(plant: MultibodyPlant, plant_context: Context,
- x: np.ndarray, model_ids: List[ModelInstanceIndex],
- space: state_space.ProductSpace) -> None:
- """Transforms and assigns ``ProductSpace``-formatted state in plant's
- mutable context.
-
- Args:
- plant: plant in which to store state.
- plant_context: plant's context which stores its state.
- x: (1, space.n_x) or (space.n_x,) state.
- model_ids: Mapping from plant's model names to instances
- space: state space of output state.
- """
- assert x.shape[-1] == space.n_x
- qs = np.array_split(x[..., :space.n_q], space.q_splits, -1)
- vs = np.array_split(x[..., space.n_q:], space.v_splits, -1)
- spaces = space.spaces
- for model_id, model_space, model_q, model_v in zip(
- model_ids, spaces, qs, vs):
- (q_drake, v_drake) = DrakeModelStateConverterFactory.state_to_drake(
- model_q, model_v, model_space)
-
- plant.SetPositions(plant_context, model_id, q_drake)
- plant.SetVelocities(plant_context, model_id, v_drake)
diff --git a/dair_pll_old/dair_pll/drake_system.py b/dair_pll_old/dair_pll/drake_system.py
deleted file mode 100644
index cd3373d..0000000
--- a/dair_pll_old/dair_pll/drake_system.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""Interface with Drake ``MultibodyPlant`` simulation.
-
-Interfacing with Drake is done by massaging a drake system into the
-``System`` interface defined in ``system.py`` with a new inheriting type,
-``DrakeSystem``.
-
-A large portion of the internal implementation of ``DrakeSystem`` is contained
-in ``MultibodyPlantDiagram`` in ``drake_utils.py``.
-"""
-from typing import Tuple, Dict, Optional
-
-import torch
-from torch import Tensor
-
-from dair_pll.drake_state_converter import DrakeStateConverter
-from dair_pll.drake_utils import MultibodyPlantDiagram
-from dair_pll.integrator import StateIntegrator
-from dair_pll.state_space import ProductSpace
-from dair_pll.system import System
-
-
-class DrakeSystem(System):
- """``System`` wrapper of a Drake simulation environment for a
- ``MultibodyPlant``.
-
- Drake simulation is constructed as a ``Simulator`` of a ``Diagram`` in a
- member ``MultibodyPlantDiagram`` variable. States are converted
- between ``StateSpace`` and Drake formats via ``DrakeStateConverter``.
- """
- plant_diagram: MultibodyPlantDiagram
- urdfs: Dict[str, str]
- dt: float
- space: ProductSpace
-
- def __init__(self,
- urdfs: Dict[str, str],
- dt: float,
- visualization_file: Optional[str] = None,
- additional_forces: Optional[str] = None,
- g_frac: Optional[float] = 1.0) -> None:
- """Inits ``DrakeSystem`` with provided model URDFs.
-
- Args:
- urdfs: Names and corresponding URDFs to add as models to plant.
- dt: Time step of plant in seconds.
- visualization_file: Optional output GIF filename for trajectory
- visualization.
- additional_forces: Optional additional forces to add to plant, e.g.
- an arbitrary force vector field.
- """
- plant_diagram = MultibodyPlantDiagram(urdfs, dt, visualization_file,
- additional_forces, g_frac=g_frac)
-
- space = plant_diagram.generate_state_space()
- integrator = StateIntegrator(space, self.sim_step, dt)
-
- super().__init__(space, integrator)
- self.plant_diagram = plant_diagram
- self.dt = dt
- self.urdfs = urdfs
- self.set_carry_sampler(lambda: Tensor([False]))
-
- # Drake simulations cannot be batched
- self.max_batch_dim = 0
-
- def preprocess_initial_condition(self, x_0: Tensor, carry_0: Tensor) -> \
- Tuple[Tensor, Tensor]:
- """Preprocesses initial condition state sequence into single state
- initial condition for integration.
-
- Args:
- x_0: (T_0, space.n_x) initial state sequence.
- carry_0: (1, ?) initial hidden state.
-
- Returns:
- (1, space.n_x) processed initial state.
- (1, ?) processed initial hidden state.
- """
- # select most recent state in this case and ensure tensor size
- # compatibility with call to ``System.preprocess_initial_condition``
- x_0, carry_0 = super().preprocess_initial_condition(x_0, carry_0)
-
- # Set state initial condition in internal Drake ``Simulator`` context.
- plant = self.plant_diagram.plant
- sim = self.plant_diagram.sim
- sim_context = sim.get_mutable_context()
- sim_context.SetTime(self.get_quantized_start_time(0.0))
- plant_context = plant.GetMyMutableContextFromRoot(
- sim.get_mutable_context())
-
- DrakeStateConverter.state_to_context(plant, plant_context,
- x_0.detach().numpy(),
- self.plant_diagram.model_ids,
- self.space)
- sim.Initialize()
-
- return x_0, carry_0
-
- def get_quantized_start_time(self, start_time: float) -> float:
- """Get phase-aligned start time for Drake ``Simulator``.
-
- As Drake models time stepping as events in a continuous time domain,
- some special care must be taken to ensure each call to
- ``DrakeSystem.step()`` triggers one update. This is done by
- offsetting the simulation duration to advance to ``N * dt + dt/4`` to
- prevent accidentally taking 2 or 0 steps with a call to ``step()``.
-
- Args:
- start_time: Time step beginning time.
-
- Returns:
- Time step quantized starting time.
- """
- dt = self.dt
- eps = dt / 4
-
- time_step_phase = start_time % dt
- offset = (dt if time_step_phase > (dt / 2.) else 0.) - time_step_phase
- cur_time_quantized = start_time + offset + eps
-
- return cur_time_quantized
-
- def sim_step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Simulate forward in time one step.
-
- Args:
- x: (n_x,) current state.
- carry: (?,) current hidden state.
-
- Returns:
- (n_x,) next state.
- (?,) next hidden state.
- """
- # pylint: disable=E1103
- assert x.shape == torch.Size([self.space.n_x])
- assert carry.dim() == 1
-
- sim = self.plant_diagram.sim
- plant = self.plant_diagram.plant
-
- # Advances one time step
- finishing_time = self.get_quantized_start_time(
- sim.get_mutable_context().get_time()) + self.dt
- sim.AdvanceTo(finishing_time)
-
- # Retrieves post-step state as numpy ndarray
- new_plant_context = plant.GetMyMutableContextFromRoot(
- sim.get_mutable_context())
- x_next = DrakeStateConverter.context_to_state(
- plant, new_plant_context, self.plant_diagram.model_ids, self.space)
-
- return Tensor(x_next), carry
diff --git a/dair_pll_old/dair_pll/drake_utils.py b/dair_pll_old/dair_pll/drake_utils.py
deleted file mode 100644
index 844e52e..0000000
--- a/dair_pll_old/dair_pll/drake_utils.py
+++ /dev/null
@@ -1,407 +0,0 @@
-"""Drake simulation setup for multibody systems.
-
-This file implements :py:class:`MultibodyPlantDiagram`, which instantiates
-Drake simulation and visualization system for a given group of URDF files.
-
-Visualization is done via Drake's VideoWriter. Details on using the VideoWriter
-are available in the documentation for :py:mod:`dair_pll.vis_utils`.
-
-In order to make the Drake states compatible with available
-:py:class:`~dair_pll.state_space.StateSpace` inheriting classes,
-users must define the drake system by a collection of URDF files, each of
-which contains a model for exactly one floating- or fixed-base rigid
-multibody chain. This allows for the system to be modeled as having a
-:py:class:`~dair_pll.state_space.ProductSpace` state space, where each
-factor space is a
-:py:class:`~dair_pll.state_space.FloatingBaseSpace`
-or :py:class:`~dair_pll.state_space.FixedBaseSpace`.
-"""
-from __future__ import annotations
-
-from dataclasses import dataclass, field
-from typing import Tuple, Dict, List, Optional, Union, Type, cast
-try:
- from typing import TypeAlias
-except ImportError:
- from typing import TypeVar
- TypeAlias = TypeVar('TypeAlias')
-
-import pdb
-
-import matplotlib.pyplot as plt
-import numpy as np
-from pydrake.autodiffutils import AutoDiffXd # type: ignore
-# pylint: disable-next=import-error
-from pydrake.geometry import HalfSpace, SceneGraph # type: ignore
-# pylint: disable-next=import-error
-from pydrake.geometry import SceneGraphInspector_, GeometryId # type: ignore
-from pydrake.math import RigidTransform, RollPitchYaw, \
- RigidTransform_ # type: ignore
-from pydrake.multibody.parsing import Parser # type: ignore
-from pydrake.multibody.plant import AddMultibodyPlantSceneGraph, \
- CoulombFriction_ # type: ignore
-from pydrake.multibody.plant import CoulombFriction # type: ignore
-from pydrake.multibody.plant import MultibodyPlant # type: ignore
-from pydrake.multibody.plant import MultibodyPlant_ # type: ignore
-from pydrake.multibody.tree import ModelInstanceIndex # type: ignore
-from pydrake.multibody.tree import SpatialInertia_ # type: ignore
-from pydrake.multibody.tree import world_model_instance, Body_ # type: ignore
-from pydrake.symbolic import Expression # type: ignore
-from pydrake.systems.analysis import Simulator # type: ignore
-from pydrake.systems.drawing import plot_system_graphviz
-from pydrake.systems.framework import DiagramBuilder, \
- DiagramBuilder_ # type: ignore
-# pylint: disable-next=import-error
-from pydrake.visualization import VideoWriter # type: ignore
-
-from dair_pll import state_space
-from dair_pll.vector_fields import VortexForceVectorField, \
- ForceVectorFieldInjectorLeafSystem, ViscousDampingVectorField
-
-WORLD_GROUND_PLANE_NAME = "world_ground_plane"
-DRAKE_MATERIAL_GROUP = 'material'
-DRAKE_FRICTION_PROPERTY = 'coulomb_friction'
-N_DRAKE_FLOATING_BODY_VELOCITIES = 6
-DEFAULT_DT = 1e-3
-
-GROUND_COLOR = np.array([0.5, 0.5, 0.5, 0.1])
-
-CAM_FOV = np.pi/6
-VIDEO_PIXELS = [480, 640]
-FPS = 30
-
-# TODO currently hard-coded camera pose could eventually be dynamically chosen
-# to fit the actual trajectory.
-SENSOR_RPY = np.array([-np.pi / 2, 0, np.pi / 2])
-SENSOR_POSITION = np.array([2., 0., 0.2])
-SENSOR_POSE = RigidTransform(
- RollPitchYaw(SENSOR_RPY).ToQuaternion(), SENSOR_POSITION)
-
-MultibodyPlantFloat: TypeAlias = cast(Type, MultibodyPlant_[float])
-MultibodyPlantAutoDiffXd: TypeAlias = cast(Type, MultibodyPlant_[AutoDiffXd])
-MultibodyPlantExpression: TypeAlias = cast(Type, MultibodyPlant_[Expression])
-DrakeMultibodyPlant = Union[MultibodyPlantFloat, MultibodyPlantAutoDiffXd,
- MultibodyPlantExpression]
-
-BodyFloat: TypeAlias = cast(Type, Body_[float])
-BodyAutoDiffXd: TypeAlias = cast(Type, Body_[AutoDiffXd])
-BodyExpression: TypeAlias = cast(Type, Body_[Expression])
-DrakeBody = Union[BodyFloat, BodyAutoDiffXd, BodyExpression]
-
-SpatialInertiaFloat: TypeAlias = cast(Type, SpatialInertia_[float])
-SpatialInertiaAutoDiffXd: TypeAlias = cast(Type, SpatialInertia_[AutoDiffXd])
-SpatialInertiaExpression: TypeAlias = cast(Type, SpatialInertia_[Expression])
-DrakeSpatialInertia = Union[SpatialInertiaFloat, SpatialInertiaAutoDiffXd,
- SpatialInertiaExpression]
-#:
-SceneGraphInspectorFloat: TypeAlias = cast(Type, SceneGraphInspector_[float])
-SceneGraphInspectorAutoDiffXd: TypeAlias = cast(
- Type, SceneGraphInspector_[AutoDiffXd])
-DrakeSceneGraphInspector = Union[SceneGraphInspectorFloat,
- SceneGraphInspectorAutoDiffXd]
-#:
-DiagramBuilderFloat: TypeAlias = cast(Type, DiagramBuilder_[float])
-DiagramBuilderAutoDiffXd: TypeAlias = cast(Type, DiagramBuilder_[AutoDiffXd])
-DiagramBuilderExpression: TypeAlias = cast(Type, DiagramBuilder_[Expression])
-DrakeDiagramBuilder = Union[DiagramBuilderFloat, DiagramBuilderAutoDiffXd,
- DiagramBuilderExpression]
-#:
-UniqueBodyIdentifier = str
-
-
-def get_bodies_in_model_instance(
- plant: DrakeMultibodyPlant,
- model_instance_index: ModelInstanceIndex) -> List[DrakeBody]:
- """Get list of body names associated with model instance.
-
- Args:
- plant:
- model_instance_index:
- """
- body_indices = plant.GetBodyIndices(model_instance_index)
- return [plant.get_body(body_index) for body_index in body_indices]
-
-
-def get_body_names_in_model_instance(
- plant: DrakeMultibodyPlant,
- model_instance_index: ModelInstanceIndex) -> List[str]:
- """Get list of body names associated with model instance."""
- bodies = get_bodies_in_model_instance(plant, model_instance_index)
- return [body.name() for body in bodies]
-
-
-def unique_body_identifier(plant: DrakeMultibodyPlant,
- body: DrakeBody) -> UniqueBodyIdentifier:
- """Unique string identifier for given ``Body_``."""
- return f'{plant.GetModelInstanceName(body.model_instance())}_{body.name()}'
-
-
-def get_all_bodies(
- plant: DrakeMultibodyPlant, model_instance_indices: List[ModelInstanceIndex]
-) -> Tuple[List[DrakeBody], List[UniqueBodyIdentifier]]:
- """Get all bodies in plant's models."""
- bodies = []
- for model_instance_index in model_instance_indices:
- bodies.extend(get_bodies_in_model_instance(plant, model_instance_index))
- return bodies, [unique_body_identifier(plant, body) for body in bodies]
-
-
-def get_all_inertial_bodies(
- plant: DrakeMultibodyPlant, model_instance_indices: List[ModelInstanceIndex]
-) -> Tuple[List[DrakeBody], List[UniqueBodyIdentifier]]:
- """Get all bodies that should have inertial parameters in plant."""
- return get_all_bodies(plant, [
- model_index for model_index in model_instance_indices
- if model_index != world_model_instance()
- ])
-
-
-@dataclass
-class CollisionGeometrySet:
- r""":py:func:`dataclasses.dataclass` for tracking object collisions."""
- ids: List[GeometryId] = field(default_factory=list)
- r"""List of geometries that may collide."""
- frictions: List[CoulombFriction] = field(
- default_factory=dict) # type: ignore
- r"""List of coulomb friction coefficients for the geometries."""
- collision_candidates: List[Tuple[int, int]] = field(
- default_factory=dict) # type: ignore
- r"""Pairs of geometries that may collide."""
-
-
-def get_collision_geometry_set(
- inspector: DrakeSceneGraphInspector) -> CollisionGeometrySet:
- """Get colliding geometries, frictional properties, and corresponding
- collision pairs in a scene.
-
- Args:
- inspector: Inspector of scene graph.
-
- Returns:
- List of geometries that are candidates for at least one collision.
- Pairs of indices in geometry list that potentially collide.
- """
- geometry_ids: List[GeometryId] = []
- geometry_pairs: List[Tuple[int, int]] = []
- coulomb_frictions: List[CoulombFriction] = []
-
- for geometry_id_a, geometry_id_b in inspector.GetCollisionCandidates():
- for geometry_id in [geometry_id_a, geometry_id_b]:
- if geometry_id not in geometry_ids:
- geometry_ids.append(geometry_id)
- geometry_index_a = geometry_ids.index(geometry_id_a)
- geometry_index_b = geometry_ids.index(geometry_id_b)
- geometry_pairs.append((geometry_index_a, geometry_index_b))
-
- for geometry_id in geometry_ids:
- proximity_properties = inspector.GetProximityProperties(geometry_id)
- coulomb_frictions.append(
- proximity_properties.GetProperty(DRAKE_MATERIAL_GROUP,
- DRAKE_FRICTION_PROPERTY))
-
- return CollisionGeometrySet(ids=geometry_ids,
- frictions=coulomb_frictions,
- collision_candidates=geometry_pairs)
-
-
-def add_plant_from_urdfs(
- builder: DrakeDiagramBuilder, urdfs: Dict[str, str], dt: float
-) -> Tuple[List[ModelInstanceIndex], MultibodyPlant, SceneGraph]:
- """Add plant to builder with prescribed URDF models.
-
- Generates a world containing each given URDF as a model instance.
-
- Args:
- builder: Diagram builder to add plant to
- urdfs: Names and corresponding URDFs to add as models to plant.
- dt: Time step of plant in seconds.
-
- Returns:
- Named dictionary of model instances returned by
- ``AddModelFromFile``.
- New plant, which has been added to builder.
- Scene graph associated with new plant.
- """
- plant, scene_graph = AddMultibodyPlantSceneGraph(builder, dt)
- parser = Parser(plant)
-
- # Build [model instance index] list, starting with world model, which is
- # always added by default.
- model_ids = [world_model_instance()]
- model_ids.extend(
- [parser.AddModelFromFile(urdf, name) for name, urdf in urdfs.items()])
-
- return model_ids, plant, scene_graph
-
-
-class MultibodyPlantDiagram:
- """Constructs and manages a diagram, simulator, and optionally a visualizer
- for a multibody system described in a list of URDF's.
-
- This minimal diagram consists of a ``MultibodyPlant``, ``SceneGraph``, and
- optionally a ``VideoWriter`` hooked up in the typical fashion.
-
- From the ``MultibodyPlant``, ``MultibodyPlantDiagram`` can infer the
- corresponding ``StateSpace`` from the dimension of the associated
- velocity vectors in the plant's context, via the one-chain-per-file
- assumption.
- """
- # pylint: disable=too-few-public-methods
- sim: Simulator
- plant: MultibodyPlant
- scene_graph: SceneGraph
- visualizer: Optional[VideoWriter]
- model_ids: List[ModelInstanceIndex]
- collision_geometry_set: CollisionGeometrySet
- space: state_space.ProductSpace
-
- def __init__(self,
- urdfs: Dict[str, str],
- dt: float = DEFAULT_DT,
- visualization_file: Optional[str] = None,
- additional_forces: Optional[str] = None,
- g_frac: Optional[float] = 1.0) -> None:
- r"""Initialization generates a world containing each given URDF as a
- model instance, and a corresponding Drake ``Simulator`` set up to
- trigger a state update every ``dt``.
-
- By default, a ground plane is added at world height ``z = 0``.
-
- Args:
- urdfs: Names and corresponding URDFs to add as models to plant.
- dt: Time step of plant in seconds.
- visualization_file: Optional output GIF filename for trajectory
- visualization.
- additional_forces: Optional additional forces to add to plant, e.g.
- an arbitrary force vector field.
- """
- builder = DiagramBuilder()
- model_ids, plant, scene_graph = add_plant_from_urdfs(builder, urdfs, dt)
-
- # Add visualizer to diagram if enabled. Sets ``delete_prefix_on_load``
- # to False, in the hopes of saving computation time; may cause
- # re-initialization to produce erroneous visualizations.
- visualizer = None
- if visualization_file:
- visualizer = VideoWriter.AddToBuilder(filename=visualization_file,
- builder=builder,
- sensor_pose=SENSOR_POSE,
- fps=FPS,
- width=VIDEO_PIXELS[1],
- height=VIDEO_PIXELS[0],
- fov_y=CAM_FOV)
-
- # Adds ground plane at ``z = 0``
- halfspace_transform = RigidTransform_[float]()
- friction = CoulombFriction_[float](1.0, 1.0)
- plant.RegisterCollisionGeometry(plant.world_body(), halfspace_transform,
- HalfSpace(), WORLD_GROUND_PLANE_NAME,
- friction)
- plant.RegisterVisualGeometry(plant.world_body(), halfspace_transform,
- HalfSpace(), WORLD_GROUND_PLANE_NAME,
- GROUND_COLOR)
-
- # get collision candidates before default context filters for proximity.
- self.collision_geometry_set = get_collision_geometry_set(
- scene_graph.model_inspector())
-
- # Edit the gravitational constant.
- new_gravity_vector = np.array([0., 0., -9.81*g_frac])
- plant.mutable_gravity_field().set_gravity_vector(new_gravity_vector)
-
- # Finalize multibody plant.
- plant.Finalize()
-
- # Handle augmented dynamics. The gravity case was handled via the
- # gravity vector edit above.
- if not additional_forces in [None, 'gravity']:
- # Get sizes for defining appropriately sized input and output ports
- # for the force vector field injector ``LeafSystem``.
- n_x = plant.get_state_output_port().size()
- n_v = plant.get_applied_generalized_force_input_port().size()
-
- # Define a force vector field.
- if additional_forces == 'vortex':
- force_vector_field = VortexForceVectorField(n_velocity=n_v)
- print("Injecting a vortex vector field into dynamics.")
- elif additional_forces == 'viscous':
- force_vector_field = ViscousDampingVectorField(n_velocity=n_v,
- w_linear=1e-1, w_angular=3e-3, w_articulation=1e-2)
- print("Injecting viscous damping vector field into dynamics.")
- else:
- raise NotImplementedError("Only additional forces implemented "
- "are vortex, viscous, geometry.")
-
- # Define a force vector field injector based on the vector field.
- vector_field_injector = ForceVectorFieldInjectorLeafSystem(
- n_state=n_x, n_velocity=n_v,
- vector_field=force_vector_field
- )
-
- vector_field_injector = builder.AddSystem(vector_field_injector)
-
- # Wire in the vector field force injector so it affects the system
- # dynamics.
- builder.Connect(
- plant.get_state_output_port(),
- vector_field_injector.GetInputPort("mbp_state")
- )
- builder.Connect(
- vector_field_injector.GetOutputPort("force_vector"),
- plant.get_applied_generalized_force_input_port()
- )
-
- # Initialize simulator from diagram.
- diagram = builder.Build()
- diagram.CreateDefaultContext()
-
- # Uncomment the below lines to generate diagram graph.
- # diagram.set_name("graphviz example")
- # plt.figure(figsize=(11,8.5), dpi=300)
- # plot_system_graphviz(diagram)
- # plt.savefig('/home/bibit/Desktop/graphviz_example.png')
-
- else:
- # Build diagram.
- diagram = builder.Build()
- diagram.CreateDefaultContext()
-
- # Initialize simulator from diagram.
- sim = Simulator(diagram)
- sim.Initialize()
- sim.set_publish_every_time_step(False)
-
- self.sim = sim
- self.plant = plant
- self.scene_graph = scene_graph
- self.visualizer = visualizer
- self.model_ids = model_ids
- self.space = self.generate_state_space()
-
- def generate_state_space(self) -> state_space.ProductSpace:
- """Generate ``StateSpace`` object for plant.
-
- Under the one-chain-per-model assumption, iteratively constructs a
- ``ProductSpace`` representation for the state of the ``MultibodyPlant``.
-
- Returns:
- State space of the diagram's underlying multibody system.
- """
- plant = self.plant
-
- spaces = [] # type: List[state_space.StateSpace]
- for model_id in self.model_ids:
- if plant.HasUniqueFreeBaseBody(model_id):
- # Ensures quaternion is used to model rotation, instead of
- # XYZMobilizer, for instance.
- free_body = plant.GetUniqueFreeBaseBodyOrThrow(model_id)
- assert free_body.has_quaternion_dofs()
-
- n_joints = plant.num_velocities(
- model_id) - N_DRAKE_FLOATING_BODY_VELOCITIES
- spaces.append(state_space.FloatingBaseSpace(n_joints))
- else:
- n_joints = plant.num_velocities(model_id)
- spaces.append(state_space.FixedBaseSpace(n_joints))
-
- return state_space.ProductSpace(spaces)
diff --git a/dair_pll_old/dair_pll/experiment.py b/dair_pll_old/dair_pll/experiment.py
deleted file mode 100644
index c5f6f0e..0000000
--- a/dair_pll_old/dair_pll/experiment.py
+++ /dev/null
@@ -1,972 +0,0 @@
-"""Defines interfaces for various learning experiments to be run.
-
-Current supported experiment types include:
-
- * :py:class:`SupervisedLearningExperiment`: An experiment where a
- :py:class:`~dair_pll.system.System` is learned to mimic a
- dataset of trajectories.
-
-"""
-import dataclasses
-import signal
-import time
-from abc import ABC, abstractmethod
-from copy import deepcopy
-from dataclasses import dataclass, field
-import pdb
-from typing import List, Tuple, Callable, Optional, Dict, cast, Union
-
-import numpy as np
-import torch
-from torch import Tensor
-from torch.optim import Optimizer
-from torch.utils.data import DataLoader
-
-from dair_pll import file_utils
-from dair_pll.dataset_management import ExperimentDataManager, \
- TrajectorySet
-from dair_pll.experiment_config import SupervisedLearningExperimentConfig
-from dair_pll.multibody_learnable_system import MultibodyLearnableSystem
-from dair_pll.state_space import StateSpace, FloatingBaseSpace
-from dair_pll.system import System, SystemSummary
-from dair_pll.wandb_manager import WeightsAndBiasesManager
-
-
-@dataclass
-class TrainingState:
- """Dataclass to store a complete summary of the state of training
- process."""
- # pylint: disable=too-many-instance-attributes
- trajectory_set_split_indices: Tuple[Tensor, Tensor, Tensor]
- """Which trajectory indices are in train/valid/test sets."""
- best_learned_system_state: dict
- """State of learned system when it had the best validation loss so far."""
- current_learned_system_state: dict
- """Current state of learned system."""
- optimizer_state: dict
- r"""Current state of training :py:class:`torch.optim.Optimizer`\ ."""
- epoch: int = 1
- """Current epoch."""
- epochs_since_best: int = 0
- """Number of epochs since best validation loss so far was achieved."""
- best_valid_loss: Tensor = field(default_factory=lambda: torch.tensor(1e10))
- """Value of best validation loss so far."""
- wandb_run_id: Optional[str] = None
- """If using W&B, the ID of the run associated with this experiment."""
- finished_training: bool = False
- """Whether training has finished."""
-
-
-TRAIN_SET = 'train'
-VALID_SET = 'valid'
-TEST_SET = 'test'
-
-TRAIN_TIME_SETS = [TRAIN_SET, VALID_SET]
-ALL_SETS = [TRAIN_SET, VALID_SET, TEST_SET]
-
-TRAINING_DURATION = 'training_duration'
-EVALUATION_DURATION = 'evaluation_duration'
-LOGGING_DURATION = 'logging_duration'
-ALL_DURATIONS = [TRAINING_DURATION, EVALUATION_DURATION, LOGGING_DURATION]
-
-MAX_SAVED_TRAJECTORIES = 5
-
-BASE_SYSTEM_NAME = 'base'
-ORACLE_SYSTEM_NAME = 'oracle'
-LEARNED_SYSTEM_NAME = 'model'
-
-LOSS_NAME = 'loss'
-TRAJECTORY_ERROR_NAME = 'trajectory_mse'
-PREDICTED_VELOCITY_SIZE = 'v_plus_squared'
-DELTA_VELOCITY_SIZE = 'delta_v_squared'
-TARGET_NAME = 'target_sample'
-PREDICTION_NAME = 'prediction_sample'
-TRAJECTORY_POSITION_ERROR_NAME = 'pos_int_traj'
-TRAJECTORY_ROTATION_ERROR_NAME = 'angle_int_traj'
-TRAJECTORY_PENETRATION_NAME = 'penetration_int_traj'
-RESIDUAL_SINGLE_STEP_SIZE_NAME = 'residual_norm_stepwise'
-RESIDUAL_TRAJECTORY_SIZE_MSE_NAME = 'residual_norm_traj_mse'
-
-AVERAGE_TAG = 'mean'
-
-EVALUATION_VARIABLES = [LOSS_NAME, TRAJECTORY_ERROR_NAME,
- TRAJECTORY_POSITION_ERROR_NAME, TRAJECTORY_ROTATION_ERROR_NAME,
- TRAJECTORY_PENETRATION_NAME, RESIDUAL_SINGLE_STEP_SIZE_NAME,
- RESIDUAL_TRAJECTORY_SIZE_MSE_NAME
-]
-
-
-#:
-EpochCallbackCallable = Callable[[int, System, Tensor, Tensor], None]
-"""Type hint for extra callback to be called on each epoch of training.
-
-Args:
- epoch: Current epoch.
- learned_system: Partially-trained learned system.
- train_loss: Current epoch's average training loss.
- best_valid_loss: Best validation loss so far.
-"""
-
-#:
-LossCallbackCallable = Callable[[Tensor, Tensor, System, bool], Tensor]
-"""Callback to evaluate loss on batch of trajectory slices.
-
-By default, set to prediction loss (
-:meth:`SupervisedLearningExperiment.prediction_loss`)
-
-Args:
- x_past: ``(*,t_history,space.n_x)`` previous states in slice.
- x_future: ``(*,t_prediction,space.n_x)`` future states in slice.
- system: system on which to evaluate loss
- keep_batch: whether or not to collapse batch into a single scalar.
-Returns:
- ``(*,)`` or scalar loss.
-"""
-
-
-def default_epoch_callback(epoch: int, _learned_system: System,
- train_loss: Tensor, best_valid_loss: Tensor) -> None:
- """Default :py:data:`EpochCallbackCallable` which prints epoch, training
- loss, and best validation loss so far."""
- print(epoch, train_loss, best_valid_loss)
-
-
-StatisticsValue = Union[List, float, np.ndarray]
-StatisticsDict = Dict[str, StatisticsValue]
-
-
-class SupervisedLearningExperiment(ABC):
- r"""Supervised learning experiment.
-
- Implements the training and evaluation processes for a supervised
- learning experiment, where a :class:`~dair_pll.system.System` is
- learned to capture a dataset of trajectories.
-
- The dataset of trajectories is encapsulated in a
- :class:`~dair_pll.dataset_management.ExperimentDataManager`
- object. This dataset is either stored to disc by the user,
- or alternatively is generated from the experiment's *base system*\ .
-
- The *base system*\ is a :class:`~dair_pll.system.System` with
- the same :class:`~dair_pll.state_space.StateSpace` as the
- system to be learned.
-
- Training is completed via a Pytorch :class:`~torch.optim.Optimizer`.
-
- The training process keeps track of various statistics about the learning
- process, and optionally logs the learned system's
- :class:`~dair_pll.system.SystemSummary` to Tensorboard on each
- epoch.
- """
- config: SupervisedLearningExperimentConfig
- """Configuration of the experiment."""
- space: StateSpace
- """State space of experiment, inferred from base system."""
- loss_callback: Optional[LossCallbackCallable]
- """Callback function for loss, defaults to prediction loss."""
- wandb_manager: Optional[WeightsAndBiasesManager]
- """Optional tensorboard interface."""
- learning_data_manager: Optional[ExperimentDataManager]
- """Manager of trajectory data used in learning process."""
-
- def __init__(self, config: SupervisedLearningExperimentConfig) -> None:
- super().__init__()
-
- self.config = config
- file_utils.assure_storage_tree_created(config.storage)
- if not hasattr(self, 'space'):
- base_system = self.get_base_system()
- self.space = base_system.space
- self.loss_callback = cast(LossCallbackCallable, self.prediction_loss)
- self.learning_data_manager = None
-
- file_utils.save_configuration(config.storage, config.run_name, config)
-
- @abstractmethod
- def get_base_system(self) -> System:
- """Abstract callback function to construct base system from system
- config.
-
- Returns:
- Experiment's base system.
- """
-
- def get_oracle_system(self) -> System:
- """Abstract callback function to construct oracle system for
- experiment.
-
- Conceptually, the oracle system is an ideal system to compare the
- learned system against. By default, the oracle system is simply the
- base system. However, in some scenarios, a different type of oracle
- is appropriate. For example, if the learned system is recurrent,
- the oracle system might most appropriately take a recurrent slice of
- initial states, process them with a Kalman Filter for the base
- system, and then predict the future.
-
- Returns:
- Experiment's oracle system.
- """
- return self.get_base_system()
-
- @abstractmethod
- def get_learned_system(self, train_states: Tensor) -> System:
- """Abstract callback function to construct learnable system for
- experiment.
-
- Optionally, learned system can be initialized to depend on the
- training dataset.
-
- Args:
- train_states: ``(*, space.n_x)`` batch of all states in training
- set.
- Returns:
- Experiment's learnable system.
- """
-
- def get_optimizer(self, learned_system: System) -> Optimizer:
- """Constructs optimizer for experiment.
-
- Args:
- learned_system: System to be trained.
-
- Returns:
- Optimizer for training.
- """
- config = self.config.optimizer_config
- if issubclass(config.optimizer, torch.optim.Adam):
- return config.optimizer(learned_system.parameters(),
- lr=config.lr.value,
- weight_decay=config.wd.value)
- raise TypeError('Unsupported optimizer type:',
- config.optimizer.__name__)
-
- def batch_predict(self, x_past: Tensor, system: System) -> Tensor:
- """Predict forward in time from initial conditions.
-
- Args:
- x_past: ``(*, t_history, space.n_x)`` batch of initial states.
- system: System to run prediction on.
-
- Returns:
- ``(*, t_prediction, space.n_x)`` batch of predicted future states.
- """
- data_config = self.config.data_config
-
- # pylint: disable=E1103
- assert system.carry_callback is not None
- carries = torch.stack([system.carry_callback() for _ in x_past])
- prediction, _ = system.simulate(x_past, carries,
- data_config.slice_config.t_prediction)
- future = prediction[..., 1:, :]
- return future
-
- def trajectory_predict(
- self,
- x: List[Tensor],
- system: System,
- do_detach: bool = False) -> Tuple[List[Tensor], List[Tensor]]:
- """Predict from full lists of trajectories.
-
- Preloads initial conditions from the first ``t_skip + 1`` elements of
- each trajectory.
-
- Args:
- x: List of ``(*, T, space.n_x)`` trajectories.
- system: System to run prediction on.
- do_detach: Whether to detach each prediction from the computation
- graph; useful for memory management for large groups of
- trajectories.
-
- Returns:
- List of ``(*, T - t_skip - 1, space.n_x)`` predicted trajectories.
-
- List of ``(*, T - t_skip - 1, space.n_x)`` target trajectories.
-
- """
- t_skip = self.config.data_config.slice_config.t_skip
- t_begin = t_skip + 1
- x_0 = [x_i[..., :t_begin, :] for x_i in x]
- targets = [x_i[..., t_begin:, :] for x_i in x]
- prediction_horizon = [x_i.shape[-2] - t_skip - 1 for x_i in x]
-
- assert system.carry_callback is not None
- carry_0 = system.carry_callback()
- predictions = []
- for x_0_i, horizon_i, target_i in zip(x_0, prediction_horizon, targets):
- target_shape = target_i.shape
- x_prediction_i, carry_i = system.simulate(x_0_i, carry_0, horizon_i)
- del carry_i
- to_append = x_prediction_i[..., 1:, :].reshape(target_shape)
- if do_detach:
- predictions.append(to_append.detach().clone())
- del x_prediction_i
- else:
- predictions.append(to_append)
- return predictions, targets
-
- def prediction_loss(self,
- x_past: Tensor,
- x_future: Tensor,
- system: System,
- keep_batch: bool = False) -> Tensor:
- r"""Default :py:data:`LossCallbackCallable` which evaluates to system's
- :math:`l_2` prediction error on batch:
-
- .. math::
-
- \mathcal{L}(x_{p,i,\cdot}, x_{f,i,\cdot}) = \sum_{j} ||\hat x_{f,
- i,j} - x_{f,i,j}||^2,
-
- where :math:`x_{p,i,\cdot}, x_{f,i,\cdot}` are the :math:`i`\ th
- elements of the past and future batches; and
- :math:`\hat x_{f,i,j}` is the :math:`j`-step forward prediction of the
- model from the past batch.
-
- See :py:data:`LossCallbackCallable` for additional type signature info.
- """
- space = self.space
- x_predicted = self.batch_predict(x_past, system)
- v_future = space.v(x_future)
- v_predicted = space.v(x_predicted)
- avg_const = v_predicted.nelement() // v_predicted.shape[0]
- if not keep_batch:
- avg_const *= x_predicted.shape[0]
- return space.velocity_square_error(v_future, v_predicted,
- keep_batch) / avg_const
-
- def batch_loss(self,
- x_past: Tensor,
- x_future: Tensor,
- system: System,
- keep_batch: bool = False) -> Tensor:
- """Runs :py:attr:`loss_callback` (a
- :py:data:`LossCallbackCallable`) on the given batch."""
- assert self.loss_callback is not None
- return self.loss_callback(x_past, x_future, system, keep_batch)
-
- def train_epoch(self,
- data: DataLoader,
- system: System,
- optimizer: Optional[Optimizer] = None) -> Tensor:
- """Train learned model for a single epoch. Takes gradient steps in the
- learned parameters if ``optimizer`` is provided.
-
- Args:
- data: Training dataset.
- system: System to be trained.
- optimizer: Optimizer which trains system.
-
- Returns:
- Scalar average training loss observed during epoch.
- """
- losses = []
- for xy_i in data:
- x_i: Tensor = xy_i[0]
- y_i: Tensor = xy_i[1]
- if optimizer is not None:
- optimizer.zero_grad()
-
- loss = self.batch_loss(x_i, y_i, system)
- losses.append(loss.clone().detach())
-
- if optimizer is not None:
- loss.backward()
- optimizer.step()
-
- avg_loss = cast(Tensor, sum(losses) / len(losses))
- return avg_loss
-
- def base_and_learned_comparison_summary(
- self, statistics: Dict, learned_system: System) -> SystemSummary:
- """Extracts a :py:class:`~dair_pll.system.SystemSummary` that compares
- the base system to the learned system.
-
- Args:
- statistics: Dictionary of training statistics.
- learned_system: Most updated version of system during training.
-
- Returns:
- Summary of comparison between systems.
- """
- # pylint: disable=unused-argument
- return SystemSummary()
-
- def build_epoch_vars_and_system_summary(self, statistics: Dict,
- learned_system: System, skip_videos=True) -> Tuple[Dict, SystemSummary]:
- """Build epoch variables and system summary for learning process.
-
- Args:
- statistics: Summary statistics for learning process.
- learned_system: System being trained.
- skip_videos: Whether to skip making videos or not.
-
- Returns:
- Dictionary of scalars to log.
- System summary.
- """
- # begin recording wall-clock logging time.
- start_log_time = time.time()
-
- epoch_vars = {}
- for stats_set in TRAIN_TIME_SETS:
- for variable in EVALUATION_VARIABLES:
- var_key = f'{stats_set}_{LEARNED_SYSTEM_NAME}' + \
- f'_{variable}_{AVERAGE_TAG}'
- if var_key in statistics:
- epoch_vars[f'{stats_set}_{variable}'] = statistics[var_key]
-
- learned_system_summary = learned_system.summary(statistics)
-
- if not skip_videos:
- comparison_summary = self.base_and_learned_comparison_summary(
- statistics, learned_system)
-
- epoch_vars.update(learned_system_summary.scalars)
- logging_duration = time.time() - start_log_time
- statistics[LOGGING_DURATION] = logging_duration
- epoch_vars.update(
- {duration: statistics[duration] for duration in ALL_DURATIONS})
-
- if not skip_videos:
- epoch_vars.update(comparison_summary.scalars)
- learned_system_summary.videos.update(comparison_summary.videos)
- learned_system_summary.meshes.update(comparison_summary.meshes)
-
- return epoch_vars, learned_system_summary
-
- def write_to_wandb(self, epoch: int, learned_system: System,
- statistics: Dict) -> None:
- """Extracts and writes summary of training progress to Tensorboard.
-
- Args:
- epoch: Current epoch.
- learned_system: System being trained.
- statistics: Summary statistics for learning process.
- """
- assert self.wandb_manager is not None
-
- # To save space on W&B storage, only generate comparison videos at first
- # and best epoch, the latter of which is implemented in
- # :meth:`_evaluation`.
- skip_videos = False #if epoch==0 else True BIBIT temporary for debugging
-
- epoch_vars, learned_system_summary = \
- self.build_epoch_vars_and_system_summary(statistics, learned_system,
- skip_videos=skip_videos)
-
- self.wandb_manager.update(epoch, epoch_vars,
- learned_system_summary.videos,
- learned_system_summary.meshes)
-
- def per_epoch_evaluation(self, epoch: int, learned_system: System,
- train_loss: Tensor,
- training_duration: float) -> Tensor:
- """Evaluates and logs training progress at end of an epoch.
-
- Runs evaluation on full slice datasets, as well as a handful of
- trajectories.
-
- Optionally logs the results on tensorboard via
- :meth:`write_to_tensorboard`.
-
- Args:
- epoch: Current epoch.
- learned_system: System being trained.
- train_loss: Scalar training loss of epoch.
- training_duration: Duration of epoch training in seconds.
-
- Returns:
- Scalar validation set loss.
- """
- # pylint: disable=too-many-locals
- assert self.learning_data_manager is not None
- start_eval_time = time.time()
- statistics = {}
-
- if (epoch % self.config.full_evaluation_period) == 0:
- train_set, valid_set, _ = \
- self.learning_data_manager.get_updated_trajectory_sets()
-
- n_train_eval = min(len(train_set.trajectories),
- self.config.full_evaluation_samples)
-
- n_valid_eval = min(len(valid_set.trajectories),
- self.config.full_evaluation_samples)
-
- train_eval_set = \
- self.learning_data_manager.make_empty_trajectory_set()
- train_eval_set.add_trajectories(
- train_set.trajectories[:n_train_eval],
- train_set.indices[:n_train_eval])
-
- valid_eval_set = \
- self.learning_data_manager.make_empty_trajectory_set()
- valid_eval_set.add_trajectories(
- valid_set.trajectories[:n_valid_eval],
- valid_set.indices[:n_valid_eval])
-
- statistics = self.evaluate_systems_on_sets(
- {LEARNED_SYSTEM_NAME: learned_system}, {
- TRAIN_SET: train_eval_set,
- VALID_SET: valid_eval_set
- })
-
- statistics[f'{TRAIN_SET}_{LEARNED_SYSTEM_NAME}_'
- f'{LOSS_NAME}_{AVERAGE_TAG}'] = float(train_loss.item())
-
- statistics[TRAINING_DURATION] = training_duration
- statistics[EVALUATION_DURATION] = time.time() - start_eval_time
-
- self.statistics = statistics
-
- if self.wandb_manager is not None:
- self.write_to_wandb(epoch, learned_system, statistics)
-
- # pylint: disable=E1103
- valid_loss_key = f'{VALID_SET}_{LEARNED_SYSTEM_NAME}_{LOSS_NAME}' \
- f'_{AVERAGE_TAG}'
- # # Use validation set mean rollout error as validation loss.
- # valid_loss_key = f'{VALID_SET}_{LEARNED_SYSTEM_NAME}' \
- # + f'_{TRAJECTORY_ERROR_NAME}_{AVERAGE_TAG}'
- valid_loss = 0.0 \
- if valid_loss_key not in statistics \
- else statistics[valid_loss_key]
- return torch.tensor(valid_loss)
-
- def setup_training(self) -> Tuple[System, Optimizer, TrainingState]:
- r"""Sets up initial condition for training process.
-
- Attempts to load initial condition from disk as a
- :py:class:`TrainingState`\ . Otherwise, a fresh training process is
- started.
-
- Returns:
- Initial learned system.
- Pytorch optimizer.
- Current state of training process.
- """
- is_resumed = False
- training_state = None
- checkpoint_filename = file_utils.get_model_filename(
- self.config.storage, self.config.run_name)
- try:
- # if a checkpoint is saved from disk, attempt to load it.
- checkpoint_dict = torch.load(checkpoint_filename)
- training_state = TrainingState(**checkpoint_dict)
- print("Resumed from disk.")
- is_resumed = True
- self.learning_data_manager = ExperimentDataManager(
- self.config.storage, self.config.data_config,
- training_state.trajectory_set_split_indices)
- except FileNotFoundError:
- self.learning_data_manager = ExperimentDataManager(
- self.config.storage, self.config.data_config)
-
- train_set, _, _ = \
- self.learning_data_manager.get_updated_trajectory_sets()
-
- # Setup optimization.
- # pylint: disable=E1103
- learned_system = self.get_learned_system(
- torch.cat(train_set.trajectories))
- optimizer = self.get_optimizer(learned_system)
-
- if is_resumed:
- assert training_state is not None
- learned_system.load_state_dict(
- training_state.current_learned_system_state)
- optimizer.load_state_dict(training_state.optimizer_state)
- else:
- training_state = TrainingState(
- self.learning_data_manager.trajectory_set_indices(),
- deepcopy(learned_system.state_dict()),
- deepcopy(learned_system.state_dict()),
- deepcopy(optimizer.state_dict()))
-
- # Our Weights & Biases logic assumes that if there's no training
- # state on disk, that resumption is not allowed. Therefore, we
- # never want to launch wandb_manager without a training state
- # saved to disk.
- torch.save(dataclasses.asdict(training_state), checkpoint_filename)
-
- if self.config.run_wandb:
- assert self.config.wandb_project is not None
- wandb_directory = file_utils.wandb_dir(self.config.storage,
- self.config.run_name)
-
- self.wandb_manager = WeightsAndBiasesManager(
- self.config.run_name, wandb_directory,
- self.config.wandb_project, training_state.wandb_run_id)
- training_state.wandb_run_id = self.wandb_manager.launch()
- self.wandb_manager.log_config(self.config)
-
- return learned_system, optimizer, training_state
-
- def train(
- self,
- epoch_callback: EpochCallbackCallable = default_epoch_callback,
- ) -> Tuple[Tensor, Tensor, System]:
- """Run training process for experiment.
-
- Terminates training with early stopping, parameters for which are set in
- :attr:`config`.
-
- Args:
- epoch_callback: Callback function at end of each epoch.
-
- Returns:
- Final-epoch training loss.
- Best-seen validation set loss.
- Fully-trained system, with parameters corresponding to best-seen
- validation loss.
- """
- checkpoint_filename = file_utils.get_model_filename(
- self.config.storage, self.config.run_name)
-
- learned_system, optimizer, training_state = self.setup_training()
- assert self.learning_data_manager is not None
-
- train_set, _, _ = \
- self.learning_data_manager.get_updated_trajectory_sets()
-
- # Prepare sets for training.
- train_dataloader = DataLoader(
- train_set.slices,
- batch_size=self.config.optimizer_config.batch_size.value,
- shuffle=True)
-
- # Calculate the training loss before any parameter updates. Calls
- # ``train_epoch`` without providing an optimizer, so no gradient steps
- # will be taken.
- learned_system.eval()
- training_loss = self.train_epoch(train_dataloader, learned_system)
-
- # Terminate if the training state indicates training already finished.
- if training_state.finished_training:
- learned_system.load_state_dict(
- training_state.best_learned_system_state)
- return training_loss, training_state.best_valid_loss, learned_system
-
- # Report losses before any parameter updates.
- if training_state.epoch == 1:
- training_state.best_valid_loss = self.per_epoch_evaluation(
- 0, learned_system, training_loss, 0.)
- epoch_callback(0, learned_system, training_loss,
- training_state.best_valid_loss)
-
- patience = self.config.optimizer_config.patience
-
- # Start training loop.
- try:
- while training_state.epoch <= self.config.optimizer_config.epochs:
- if self.config.data_config.update_dynamically:
- # reload training data
-
- # get train/test/val trajectories
- train_set, _, _ = \
- self.learning_data_manager.get_updated_trajectory_sets()
-
- # Prepare sets for training.
- train_dataloader = DataLoader(
- train_set.slices,
- batch_size=self.config.optimizer_config.batch_size.
- value,
- shuffle=True)
-
- training_state.trajectory_set_split_indices = \
- self.learning_data_manager.trajectory_set_indices()
-
- learned_system.train()
- start_train_time = time.time()
- training_loss = self.train_epoch(train_dataloader,
- learned_system, optimizer)
- training_duration = time.time() - start_train_time
- learned_system.eval()
- valid_loss = self.per_epoch_evaluation(training_state.epoch,
- learned_system,
- training_loss,
- training_duration)
-
- # Check for validation loss improvement.
- if valid_loss < training_state.best_valid_loss:
- training_state.best_valid_loss = valid_loss
- training_state.best_learned_system_state = deepcopy(
- learned_system.state_dict())
- training_state.epochs_since_best = 0
- else:
- training_state.epochs_since_best += 1
-
- epoch_callback(training_state.epoch, learned_system,
- training_loss, training_state.best_valid_loss)
-
- # Decide to early-stop or not.
- if training_state.epochs_since_best >= patience:
- break
-
- training_state.current_learned_system_state = \
- learned_system.state_dict()
- training_state.optimizer_state = optimizer.state_dict()
- training_state.epoch += 1
-
- # Mark training as completed, whether by early stopping or by
- # reaching the epoch limit.
- training_state.finished_training = True
-
- finally:
- # this code should execute, even if a program exit is triggered
- # in the above try block.
-
- # Stop SIGINT (Ctrl+C) from exiting during saving.
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- print("Saving training state before exit...")
- torch.save(dataclasses.asdict(training_state), checkpoint_filename)
- signal.signal(signal.SIGINT, signal.SIG_DFL)
-
- # Reload best parameters.
- print("Loading best parameters...")
- learned_system.load_state_dict(training_state.best_learned_system_state)
- print("Done loading best parameters.")
- return training_loss, training_state.best_valid_loss, learned_system
-
- def extra_metrics(self) -> Dict[str, Callable[[Tensor, Tensor], Tensor]]:
- return {}
-
- def evaluate_systems_on_sets(
- self, systems: Dict[str, System],
- sets: Dict[str, TrajectorySet]) -> StatisticsDict:
- r"""Evaluate given systems on trajectory sets.
-
- Builds a "statistics" dictionary containing a thorough evaluation
- each system on each set, containing the following:
-
- * Single step and trajectory prediction losses.
- * Squared norms of velocity and delta-velocity (for normalization).
- * Sample target and prediction trajectories.
- * Auxiliary trajectory comparisons defined in
- :meth:`dair_pll.state_space.StateSpace\
- .auxiliary_comparisons()`
- * Summary statistics of the above where applicable.
-
- Args:
- systems: Named dictionary of systems to evaluate.
- sets: Named dictionary of sets to evaluate.
-
- Returns:
- Statistics dictionary.
-
- Warnings:
- Currently assumes prediction horizon of 1.
- """
- # pylint: disable=too-many-locals
-
- stats = {} # type: StatisticsDict
- space = self.space
-
- def to_json(possible_tensor: Union[float, List, Tensor]) -> \
- StatisticsValue:
- """Converts tensor to :class:`~np.ndarray`, which enables saving
- stats as json."""
- if isinstance(possible_tensor, list):
- return [to_json(value) for value in possible_tensor]
- if torch.is_tensor(possible_tensor):
- tensor = cast(Tensor, possible_tensor)
- return tensor.detach().cpu().numpy()
-
- assert isinstance(possible_tensor, float)
- return possible_tensor
-
- for set_name, trajectory_set in sets.items():
- trajectories = trajectory_set.trajectories
- n_saved_trajectories = min(MAX_SAVED_TRAJECTORIES,
- len(trajectories))
- slices_loader = DataLoader(trajectory_set.slices,
- batch_size=128,
- shuffle=False)
- slices = trajectory_set.slices[:]
- all_x = cast(List[Tensor], slices[0])
- all_y = cast(List[Tensor], slices[1])
-
- # hack: assume 1-step prediction for now
- # pylint: disable=E1103
- v_plus = [space.v(y[:1, :]) for y in all_y]
- v_minus = [space.v(x[-1:, :]) for x in all_x]
- dv2 = torch.stack([
- space.velocity_square_error(vp, vm)
- for vp, vm in zip(v_plus, v_minus)
- ])
- vp2 = torch.stack(
- [space.velocity_square_error(vp, 0 * vp) for vp in v_plus])
- stats[f'{set_name}_{DELTA_VELOCITY_SIZE}'] = to_json(dv2)
- stats[f'{set_name}_{PREDICTED_VELOCITY_SIZE}'] = to_json(vp2)
-
- for system_name, system in systems.items():
- model_loss_list = []
- for batch_x, batch_y in slices_loader:
- model_loss_list.append(
- self.prediction_loss(batch_x, batch_y, system, True))
- model_loss = torch.cat(model_loss_list)
- loss_name = f'{set_name}_{system_name}_{LOSS_NAME}'
- stats[loss_name] = to_json(model_loss)
-
- if system_name == LEARNED_SYSTEM_NAME:
- trajectories = [t.unsqueeze(0) for t in trajectories]
- traj_pred, traj_target = self.trajectory_predict(
- trajectories, system, True)
- if system_name == LEARNED_SYSTEM_NAME:
- traj_target = [t.squeeze(0) for t in traj_target]
- traj_pred = [t.squeeze(0) for t in traj_pred]
- stats[f'{set_name}_{system_name}_{TARGET_NAME}'] = \
- to_json(traj_target[:n_saved_trajectories])
- stats[f'{set_name}_{system_name}_{PREDICTION_NAME}'] = \
- to_json(traj_pred[:n_saved_trajectories])
-
- # pylint: disable=E1103
- trajectory_mse = torch.stack([
- space.state_square_error(tp, tt)
- for tp, tt in zip(traj_pred, traj_target)
- ])
- stats[f'{set_name}_{system_name}_{TRAJECTORY_ERROR_NAME}'] = \
- to_json(trajectory_mse)
-
- # Add position and rotation error over trajectory. TODO this
- # could be implemented more elegantly; perhaps somewhere else
- # like in space.auxiliary_comparisons or a child experiment
- # class like DrakeMultibodyLearnableExperiment.
- running_pos_mse = None
- running_angle_mse = None
- for space_i in space.spaces:
- if isinstance(space_i, FloatingBaseSpace):
- pos_mse = torch.stack([
- space_i.base_error(tp, tt)
- for tp, tt in zip(traj_pred, traj_target)
- ])
- angle_mse = torch.stack([
- space_i.quaternion_error(tp, tt)
- for tp, tt in zip(traj_pred, traj_target)
- ])
- if running_pos_mse == None:
- running_pos_mse = pos_mse
- running_angle_mse = angle_mse
- else:
- running_pos_mse += pos_mse
- running_angle_mse += angle_mse
-
- stats[f'{set_name}_{system_name}_' + \
- f'{TRAJECTORY_POSITION_ERROR_NAME}'] = \
- to_json(running_pos_mse)
- stats[f'{set_name}_{system_name}_' + \
- f'{TRAJECTORY_ROTATION_ERROR_NAME}'] = \
- to_json(running_angle_mse)
-
- # Add residual sizes over trajectory and single steps.
- if isinstance(system, MultibodyLearnableSystem):
- if system.residual_net != None:
- residual_mse = torch.stack([
- torch.linalg.norm(system.residual_net(tp),
- dim=1).sum()
- for tp in traj_pred
- ])
- stats[f'{set_name}_{system_name}_' + \
- f'{RESIDUAL_TRAJECTORY_SIZE_MSE_NAME}'] = \
- to_json(residual_mse/len(traj_pred))
-
- residual_single_step_mse = torch.stack([
- torch.linalg.norm(system.residual_net(x_i),
- dim=1).sum()
- for x_i in all_x
- ])
- stats[f'{set_name}_{system_name}_' + \
- f'{RESIDUAL_SINGLE_STEP_SIZE_NAME}'] = \
- to_json(residual_mse/len(all_x))
-
- extra_metrics = self.extra_metrics()
- for metric_name in extra_metrics:
- stats[f'{set_name}_{system_name}_{metric_name}'] = to_json(
- Tensor([
- extra_metrics[metric_name](tp, tt)
- for tp, tt in zip(traj_pred, traj_target)
- ]))
-
- aux_comps = space.auxiliary_comparisons()
- for comp_name in aux_comps:
- stats[f'{set_name}_{system_name}_{comp_name}'] = to_json([
- aux_comps[comp_name](tp, tt)
- for tp, tt in zip(traj_pred, traj_target)
- ])
-
- summary_stats = {} # type: StatisticsDict
- for key, stat in stats.items():
- if isinstance(stat, np.ndarray):
- if len(stat) > 0:
- if isinstance(stat[0], float):
- summary_stats[f'{key}_{AVERAGE_TAG}'] = np.average(stat)
-
- stats.update(summary_stats)
- return stats
-
- def _evaluation(self, learned_system: System) -> StatisticsDict:
- r"""Evaluate both oracle and learned system on training, validation,
- and testing data, and saves results to disk.
-
- Implemented as a wrapper for :meth:`evaluate_systems_on_sets`.
-
- Args:
- learned_system: Trained system.
-
- Returns:
- Statistics dictionary.
-
- Warnings:
- Currently assumes prediction horizon of 1.
- """
- assert self.learning_data_manager is not None
- sets = dict(
- zip(ALL_SETS,
- self.learning_data_manager.get_updated_trajectory_sets()))
- systems = {
- ORACLE_SYSTEM_NAME: self.get_oracle_system(),
- LEARNED_SYSTEM_NAME: learned_system
- }
-
- evaluation = self.evaluate_systems_on_sets(systems, sets)
- file_utils.save_evaluation(self.config.storage, self.config.run_name,
- evaluation)
-
- # Generate final toss/geometry inspection videos with best parameters.
- comparison_summary = self.base_and_learned_comparison_summary(
- evaluation, learned_system)
- self.wandb_manager.update(int(1e4), {}, comparison_summary.videos, {})
-
- return evaluation
-
- def generate_results(
- self,
- epoch_callback: EpochCallbackCallable = default_epoch_callback,
- ) -> Tuple[System, StatisticsDict]:
- r"""Get the final learned model and results/statistics of experiment.
- Along with the model corresponding to best validation loss, this will
- return previously saved results on disk if they already exist, or run
- the experiment to generate them if they don't.
-
- Args:
- epoch_callback: Callback function at end of each epoch.
-
- Returns:
- Fully-trained system, with parameters corresponding to best-seen
- validation loss.
- Statistics dictionary.
- """
- _, _, learned_system = self.train(epoch_callback)
-
- try:
- print("Looking for previously generated statistics...")
- statistics = file_utils.load_evaluation(self.config.storage,
- self.config.run_name)
- print("Done loading statistics.")
- except FileNotFoundError:
- print("Did not find statistics; generating them... (this could " + \
- "take several minutes)")
- statistics = self._evaluation(learned_system)
- print("Done generating statistics.")
-
- return learned_system, statistics
diff --git a/dair_pll_old/dair_pll/experiment_config.py b/dair_pll_old/dair_pll/experiment_config.py
deleted file mode 100644
index cbf3118..0000000
--- a/dair_pll_old/dair_pll/experiment_config.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""Configuration dataclasses for experiments."""
-from dataclasses import dataclass, field
-from typing import Type, Optional
-
-import torch
-from torch.optim import Optimizer
-
-from dair_pll.dataset_management import DataConfig
-from dair_pll.hyperparameter import Float, Int
-
-
-@dataclass
-class SystemConfig:
- """Dummy base :py:class:`~dataclasses.dataclass` for parameters for
- learning dynamics; all inheriting classes are expected to contain all
- necessary configuration attributes."""
-
-
-@dataclass
-class OptimizerConfig:
- """:func:`~dataclasses.dataclass` defining setup and usage opf a Pytorch
- :func:`~torch.optim.Optimizer` for learning."""
- optimizer: Type[Optimizer] = torch.optim.Adam
- """Subclass of :py:class:`~torch.optim.Optimizer` to use."""
- lr: Float = Float(1e-5, log=True)
- """Learning rate."""
- wd: Float = Float(4e-5, log=True)
- """Weight decay."""
- epochs: int = 10000
- """Maximum number of epochs to optimize."""
- patience: int = 30
- """Number of epochs to wait for early stopping."""
- batch_size: Int = Int(64, log=True)
- """Size of batch for an individual gradient step."""
-
-
-@dataclass
-class SupervisedLearningExperimentConfig:
- """:py:class:`~dataclasses.dataclass` defining setup of a
- :py:class:`SupervisedLearningExperiment`"""
- # pylint: disable=too-many-instance-attributes
- data_config: DataConfig = field(default_factory=DataConfig)
- """Configuration for experiment's
- :py:class:`~dair_pll.system_data_manager.SystemDataManager`."""
- base_config: SystemConfig = field(default_factory=SystemConfig)
- """Configuration for experiment's "base" system, from which trajectories
- are modeled and optionally generated."""
- learnable_config: SystemConfig = field(default_factory=SystemConfig)
- """Configuration for system to be learned."""
- optimizer_config: OptimizerConfig = field(default_factory=OptimizerConfig)
- """Configuration for experiment's optimization process."""
- storage: str = './'
- """Folder for results/data storage. Defaults to working directory."""
- run_name: str = 'experiment_run'
- """Unique identifier for experiment run."""
- run_wandb: bool = True
- """Whether to run Weights and Biases logging."""
- wandb_project: Optional[str] = None
- r"""If :py:attr:`run_wandb`\ , a project to store results under on W&B."""
- full_evaluation_period: int = 1
- """How many epochs should pass between full evaluations."""
- full_evaluation_samples: int = 5
- """How many trajectories to save in full for experiment's summary."""
- update_geometry_in_videos: bool = False
- """Whether to use learned geometry in rollout videos, primarily for
- debugging purposes."""
-
- def __post_init__(self):
- """Method to check validity of parameters."""
- if self.run_wandb:
- assert self.wandb_project is not None
diff --git a/dair_pll_old/dair_pll/file_utils.py b/dair_pll_old/dair_pll/file_utils.py
deleted file mode 100644
index dae7deb..0000000
--- a/dair_pll_old/dair_pll/file_utils.py
+++ /dev/null
@@ -1,384 +0,0 @@
-"""Utility functions for managing saved files for training models.
-
-File system is organized around a "storage" directory associated with data and
-training runs. The functions herein can be used to return absolute paths of and
-summary information about the content of this directory.
-"""
-import glob
-import json
-import os
-import random
-import pickle
-from os import path
-from typing import List, Callable, BinaryIO, Any, TextIO, Optional
-
-
-TRAJ_EXTENSION = '.pt' # trajectory file
-HYPERPARAMETERS_EXTENSION = '.json' # hyperparameter set file
-STATS_EXTENSION = '.pkl' # experiment statistics
-CONFIG_EXTENSION = '.pkl'
-CHECKPOINT_EXTENSION = '.pt'
-DATA_SUBFOLDER_NAME = 'data'
-LEARNING_DATA_SUBFOLDER_NAME = 'learning'
-GROUND_TRUTH_DATA_SUBFOLDER_NAME = 'ground_truth'
-RUNS_SUBFOLDER_NAME = 'runs'
-STUDIES_SUBFOLDER_NAME = 'studies'
-URDFS_SUBFOLDER_NAME = 'urdfs'
-WANDB_SUBFOLDER_NAME = 'wandb'
-TRAJECTORY_GIF_DEFAULT_NAME = 'trajectory.gif'
-FINAL_EVALUATION_NAME = f'statistics{STATS_EXTENSION}'
-HYPERPARAMETERS_FILENAME = f'optimal_hyperparameters{HYPERPARAMETERS_EXTENSION}'
-CONFIG_FILENAME = f'config{CONFIG_EXTENSION}'
-CHECKPOINT_FILENAME = f'checkpoint{CHECKPOINT_EXTENSION}'
-"""str: extensions for saved files"""
-
-
-def assure_created(directory: str) -> str:
- """Wrapper to put around directory paths which ensure their existence.
-
- Args:
- directory: Path of directory that may not exist.
-
- Returns:
- ``directory``, Which is ensured to exist by recursive mkdir.
- """
- directory = path.abspath(directory)
- if not path.exists(directory):
- assure_created(path.dirname(directory))
- os.mkdir(directory)
- return directory
-
-
-MAIN_DIR = path.dirname(path.dirname(__file__))
-LOG_DIR = assure_created(os.path.join(MAIN_DIR, 'logs'))
-ASSETS_DIR = assure_created(os.path.join(MAIN_DIR, 'assets'))
-# str: locations of key static directories
-
-def get_generated_urdf(run_dir: str, asset_file_basename: str) -> str:
- return os.path.join(run_dir, 'urdfs', asset_file_basename)
-
-def get_asset(asset_file_basename: str) -> str:
- """Gets
-
- Args:
- asset_file_basename: Basename of asset file located in ``ASSET_DIR``
-
- Returns:
- Asset's absolute path.
- """
- return os.path.join(ASSETS_DIR, asset_file_basename)
-
-
-def assure_storage_tree_created(storage_name: str) -> None:
- """Assure that all subdirectories of specified storage are created.
-
- Args:
- storage_name: name of storage directory.
- """
- storage_directories = [data_dir, all_runs_dir,
- all_studies_dir] # type: List[Callable[[str],str]]
-
- for directory in storage_directories:
- assure_created(directory(storage_name))
-
-
-def list_file_nums(path: str) -> List[int]:
- file_nums = []
- for file_name in os.listdir(path):
- file_nums.append(int(file_name.split('.')[0]))
- return file_nums
-
-
-def import_data_to_storage(storage_name: str, import_data_dir: str,
- num: int = None) -> List[int]:
- """Import data in external folder into data directory.
-
- Args:
- storage_name: Name of storage for data import.
- import_data_dir: Directory to import data from.
- num: Number of trajectories to import. If `None`, imports all in the
- directory.
- """
- output_directories = [
- ground_truth_data_dir(storage_name),
- learning_data_dir(storage_name)
- ]
- data_traj_count = get_numeric_file_count(import_data_dir, TRAJ_EXTENSION)
- run_indices = [i for i in range(data_traj_count)]
-
- target_traj_number = data_traj_count if num is None else \
- min(num, data_traj_count)
-
- # Check if data is synchronized already.
- for output_directory in output_directories:
- storage_traj_count = get_numeric_file_count(output_directory,
- TRAJ_EXTENSION)
-
- # Overwrite in case of any discrepancies.
- if storage_traj_count != target_traj_number:
-
- # Copy entire directory if all trajectories are desired.
- if target_traj_number == data_traj_count:
- for output_dir in output_directories:
- os.system(f'rm -r {output_dir}')
- os.system(f'cp -r {import_data_dir} {output_dir}')
-
- # Copy a random subset of trajectories if want a smaller number.
- else:
- random.shuffle(run_indices)
- run_indices = run_indices[:target_traj_number]
- for output_dir in output_directories:
- os.system(f'rm -r {output_dir}')
- os.system(f'mkdir {output_dir}')
-
- # Copy over a random selection of trajectories, numbering
- # from 0.
- for i, run in zip(range(target_traj_number), run_indices):
- os.system(f'cp {import_data_dir}/{run}.pt ' + \
- f'{output_dir}/{i}.pt')
-
- # Can terminate outer loop over output directories since all output
- # directories are written to at once.
- return
-
-
-def storage_dir(storage_name: str) -> str:
- """Absolute path of storage directory"""
- # return assure_created(os.path.join(RESULTS_DIR, storage_name))
- return assure_created(storage_name)
-
-
-def data_dir(storage_name: str) -> str:
- """Absolute path of data folder."""
- return assure_created(
- path.join(storage_dir(storage_name), DATA_SUBFOLDER_NAME))
-
-
-def learning_data_dir(storage_name: str) -> str:
- """Absolute path of folder for data preprocessed for
- training/validation."""
- return assure_created(
- path.join(data_dir(storage_name), LEARNING_DATA_SUBFOLDER_NAME))
-
-
-def ground_truth_data_dir(storage_name: str) -> str:
- """Absolute path of folder for raw unprocessed trajectories."""
- return assure_created(
- path.join(data_dir(storage_name), GROUND_TRUTH_DATA_SUBFOLDER_NAME))
-
-
-def all_runs_dir(storage_name: str) -> str:
- """Absolute path of tensorboard storage folder"""
- return assure_created(
- path.join(storage_dir(storage_name), RUNS_SUBFOLDER_NAME))
-
-
-def all_studies_dir(storage_name: str) -> str:
- """Absolute path of tensorboard storage folder"""
- return assure_created(
- path.join(storage_dir(storage_name), STUDIES_SUBFOLDER_NAME))
-
-
-def delete(file_name: str) -> None:
- """Removes file at path specified by ``file_name``"""
- if path.exists(file_name):
- os.remove(file_name)
-
-
-def get_numeric_file_count(directory: str,
- extension: str = TRAJ_EXTENSION) -> int:
- """Count number of whole-number-named files.
-
- If folder ``/fldr`` has contents (7.pt, 11.pt, 4.pt), then::
-
- get_numeric_file_count("/fldr", ".pt") == 3
-
- Args:
- directory: Directory to tally file count in
- extension: Extension of files to be counted
-
- Returns:
- Number of files in specified ``directory`` with specified
- ``extension`` that have an integer basename.
- """
- return len(glob.glob(path.join(directory, './[0-9]*' + extension)))
-
-
-def get_trajectory_count(trajectory_dir: str):
- """Count number of trajectories on disk in given directory."""
- return get_numeric_file_count(trajectory_dir, TRAJ_EXTENSION)
-
-
-def trajectory_file(trajectory_dir: str, num_trajectory: int) -> str:
- """Absolute path of specific trajectory in storage"""
- return path.join(trajectory_dir,
- f'{num_trajectory}{TRAJ_EXTENSION}')
-
-
-def run_dir(storage_name: str, run_name: str) -> str:
- """Absolute path of run-specific storage folder."""
- return assure_created(path.join(all_runs_dir(storage_name), run_name))
-
-
-def get_trajectory_video_filename(storage_name: str, run_name: str) -> str:
- """Return the filepath of the temporary rollout video gif."""
- return path.join(run_dir(storage_name, run_name),
- TRAJECTORY_GIF_DEFAULT_NAME)
-
-
-def get_learned_urdf_dir(storage_name: str, run_name: str) -> str:
- """Absolute path of learned model URDF storage directory."""
- return assure_created(
- path.join(run_dir(storage_name, run_name), URDFS_SUBFOLDER_NAME))
-
-
-def wandb_dir(storage_name: str, run_name: str) -> str:
- """Absolute path of tensorboard storage folder"""
- return assure_created(
- path.join(run_dir(storage_name, run_name), WANDB_SUBFOLDER_NAME))
-
-
-def get_evaluation_filename(storage_name: str, run_name: str) -> str:
- """Absolute path of experiment run statistics file."""
- return path.join(run_dir(storage_name, run_name), FINAL_EVALUATION_NAME)
-
-
-def get_configuration_filename(storage_name: str, run_name: str) -> str:
- """Absolute path of experiment configuration."""
- return path.join(run_dir(storage_name, run_name), CONFIG_FILENAME)
-
-
-def get_model_filename(storage_name: str, run_name: str) -> str:
- """Absolute path of experiment configuration."""
- return path.join(run_dir(storage_name, run_name), CHECKPOINT_FILENAME)
-
-
-def get_geometrically_accurate_urdf(urdf_name: str) -> str:
- """Replaces urdf_name with the name of a urdf corresponding to the same
- system with accurate geometry.
-
- Args:
- urdf_name: Name of a URDF file located in ``ASSET_DIR``
-
- Returns:
- The name of a URDF file located in ``ASSET_DIR`` that contains the true
- geometry of the system.
- """
- URDF_MAP = {'contactnets_cube_bad_init.urdf': 'contactnets_cube.urdf',
- 'contactnets_cube_small_init.urdf': 'contactnets_cube.urdf',
- 'contactnets_cube.urdf': 'contactnets_cube.urdf',
- 'contactnets_cube_mesh.urdf': 'contactnets_cube_mesh.urdf',
- 'contactnets_elbow_bad_init.urdf': 'contactnets_elbow.urdf',
- 'contactnets_elbow_small_init.urdf': 'contactnets_elbow.urdf',
- 'contactnets_elbow.urdf': 'contactnets_elbow.urdf',
- 'contactnets_elbow_mesh.urdf': 'contactnets_elbow_mesh.urdf',
- 'contactnets_asymmetric.urdf': 'contactnets_asymmetric.urdf',
- 'bundlesdf_cube_mesh.urdf': 'bundlesdf_cube_mesh.urdf',
- 'bundlesdf_bottle_mesh.urdf': 'bundlesdf_bottle_mesh.urdf'}
- base_name = urdf_name.split('/')[-1]
-
- if base_name in URDF_MAP.keys():
- return get_asset(URDF_MAP[base_name])
- else:
- print(f'Could not find geometrically accurate version of {base_name};' +
- f' defaulting to using it directly.')
- return get_asset(base_name)
-
-
-def study_dir(storage_name: str, study_name: str) -> str:
- """Absolute path of study-specific storage folder."""
- return assure_created(path.join(all_studies_dir(storage_name), study_name))
-
-
-def hyperparameter_opt_run_name(study_name: str, trial_number: int) -> str:
- """Experiment run name for hyperparameter optimization trial."""
- return f'{study_name}_hyperparameter_opt_{trial_number}'
-
-
-def sweep_run_name(study_name: str, sweep_run: int, n_train: int) -> str:
- """Experiment run name for dataset size sweep study."""
- return f'{study_name}_sweep_{sweep_run}_n_train_{n_train}'
-
-
-def get_hyperparameter_filename(storage_name: str, study_name: str) -> str:
- """Absolute path of optimized hyperparameters for a study"""
- return path.join(study_dir(storage_name, study_name),
- HYPERPARAMETERS_FILENAME)
-
-
-def load_binary(filename: str, load_callback: Callable[[BinaryIO], Any]) -> Any:
- """Load binary file"""
- with open(filename, 'rb') as file:
- value = load_callback(file)
- return value
-
-
-def load_string(filename: str,
- load_callback: Optional[Callable[[TextIO], Any]] = None) -> Any:
- """Load text file"""
- with open(filename, 'r', encoding='utf8') as file:
- if load_callback:
- value = load_callback(file)
- else:
- value = file.read()
- return value
-
-
-def save_binary(filename: str, value: Any,
- save_callback: Callable[[Any, BinaryIO], None]) -> None:
- """Save binary file."""
- with open(filename, 'wb') as file:
- save_callback(value, file)
-
-
-def save_string(
- filename: str,
- value: Any,
- save_callback: Optional[Callable[[Any, TextIO], None]] = None) -> None:
- """Save text file."""
- with open(filename, 'w', encoding='utf8') as file:
- if save_callback:
- save_callback(value, file)
- else:
- assert isinstance(value, str)
- file.write(value)
-
-
-def load_configuration(storage_name: str, run_name: str) -> Any:
- """Load configuration file."""
- configuration_filename = get_configuration_filename(storage_name, run_name)
- configuration = load_binary(configuration_filename, pickle.load)
- return configuration
-
-
-def save_configuration(storage_name: str, run_name: str, config: Any) -> None:
- """Save configuration file."""
- configuration_filename = get_configuration_filename(storage_name, run_name)
- save_binary(configuration_filename, config, pickle.dump)
-
-
-def load_evaluation(storage_name: str, run_name: str) -> Any:
- """Load evaluation file."""
- evaluation_filename = get_evaluation_filename(storage_name, run_name)
- return load_binary(evaluation_filename, pickle.load)
-
-
-def save_evaluation(storage_name: str, run_name: str, evaluation: Any) -> None:
- """Save evaluation file."""
- evaluation_filename = get_evaluation_filename(storage_name, run_name)
- save_binary(evaluation_filename, evaluation, pickle.dump)
-
-
-def load_hyperparameters(storage_name: str, study_name: str) -> Any:
- """Load hyperparameter file."""
- hyperparameter_filename = get_hyperparameter_filename(
- storage_name, study_name)
- return load_string(hyperparameter_filename, json.load)
-
-
-def save_hyperparameters(storage_name: str, study_name: str,
- hyperparameters: Any) -> None:
- """Save hyperparameter file."""
- hyperparameter_filename = get_hyperparameter_filename(
- storage_name, study_name)
- save_string(hyperparameter_filename, hyperparameters, json.dump)
diff --git a/dair_pll_old/dair_pll/geometry.py b/dair_pll_old/dair_pll/geometry.py
deleted file mode 100644
index 691bf18..0000000
--- a/dair_pll_old/dair_pll/geometry.py
+++ /dev/null
@@ -1,683 +0,0 @@
-"""Collision geometry representation for multibody systems.
-
-Each type of collision geometry is modeled as a class inheriting from the
-``CollisionGeometry`` abstract type. Different types of inheriting geometries
-will need to resolve collisions in unique ways, but one interface is always
-expected: a list of scalars to track during training.
-
-Many collision geometries, such as boxes and cylinders, fall into the class
-of bounded (compact) convex shapes. A general interface is defined in the
-abstract ``BoundedConvexCollisionGeometry`` type, which returns a set of
-witness points given support hyperplane directions. One implementation is
-the ``SparseVertexConvexCollisionGeometry`` type, which finds these points
-via brute force optimization over a short list of vertices.
-
-All collision geometries implemented here mirror a Drake ``Shape`` object. A
-general purpose converter is implemented in
-``PydrakeToCollisionGeometryFactory``.
-"""
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Tuple, Dict, cast, Union
-
-import fcl # type: ignore
-import numpy as np
-import pywavefront # type: ignore
-import torch
-from pydrake.geometry import Box as DrakeBox # type: ignore
-from pydrake.geometry import HalfSpace as DrakeHalfSpace # type: ignore
-from pydrake.geometry import Mesh as DrakeMesh # type: ignore
-from pydrake.geometry import Shape # type: ignore
-from torch import Tensor
-from torch.nn import Module, Parameter
-
-from dair_pll.deep_support_function import HomogeneousICNN, \
- extract_mesh_from_support_function
-from dair_pll.tensor_utils import pbmm, tile_dim, \
- rotation_matrix_from_one_vector
-
-_UNIT_BOX_VERTICES = Tensor([[0, 0, 0, 0, 1, 1, 1, 1.], [
- 0, 0, 1, 1, 0, 0, 1, 1.
-], [0, 1, 0, 1, 0, 1, 0, 1.]]).t() * 2. - 1.
-
-_ROT_Z_45 = Tensor([[2**(-0.5), -(2**(-0.5)), 0.], [2**(-0.5), 2**(-0.5), 0.],
- [0., 0., 1.]])
-
-_NOMINAL_HALF_LENGTH = 0.05 # 10cm is nominal object length
-
-_total_ordering = ['Plane', 'Polygon', 'Box', 'Sphere', 'DeepSupportConvex']
-
-_POLYGON_DEFAULT_N_QUERY = 4
-_DEEP_SUPPORT_DEFAULT_N_QUERY = 4
-_DEEP_SUPPORT_DEFAULT_DEPTH = 2
-_DEEP_SUPPORT_DEFAULT_WIDTH = 256
-
-
-
-class CollisionGeometry(ABC, Module):
- """Abstract interface for collision geometries.
-
- Collision geometries have heterogeneous implementation depending on the
- type of shape. This class mainly enforces the implementation of
- bookkeeping interfaces.
-
- When two collision geometries are evaluated for collision with
- ``GeometryCollider``, their ordering is constrained via a total order on
- collision geometry types, enforced with an overload of the ``>`` operator.
- """
-
- def __ge__(self, other) -> bool:
- """Evaluate total ordering of two geometries based on their types."""
- return _total_ordering.index(
- type(self).__name__) > _total_ordering.index(type(other).__name__)
-
- def __lt__(self, other) -> bool:
- """Evaluate total ordering of two geometries via passthrough to
- ``CollisionGeometry.__ge__()``."""
- return other.__ge__(self)
-
- @abstractmethod
- def scalars(self) -> Dict[str, float]:
- """Describes object via Tensorboard scalars.
-
- Any namespace for the object (e.g. "object_5") is assumed to be added by
- external code before adding to Tensorboard, so the names of the
- scalars can be natural descriptions of the geometry's parameters.
-
- Examples:
- A cylinder might be represented with the following output::
-
- {'radius': 5.2, 'height': 4.1}
-
- Returns:
- A dictionary of named parameters describing the geometry.
- """
-
-
-class Plane(CollisionGeometry):
- """Half-space/plane collision geometry.
-
- ``Plane`` geometries are assumed to be the plane z=0 in local (i.e.
- "body-axes" coordinates). Any tilted/raised/lowered half spaces are expected
- to be derived by placing the ``z=0`` plane in a rigidly-transformed
- frame."""
-
- def scalars(self) -> Dict[str, float]:
- """As the plane is fixed to be z=0, there are no parameters."""
- return {}
-
-
-class BoundedConvexCollisionGeometry(CollisionGeometry):
- """Interface for compact-convex collision geometries.
-
- Such shapes have a representation via a "support function" h(d),
- which takes in a hyperplane direction and returns how far the shape S
- extends in that direction, i.e.::
-
- h(d) = max_{s \\in S} s \\cdot d.
-
- This representation can be leveraged to derive "witness points" -- i.e.
- the closest point(s) between the ``BoundedConvexCollisionGeometry`` and
- another convex shape, such as another ``BoundedConvexCollisionGeometry``
- or a ``Plane``.
- """
-
- @abstractmethod
- def support_points(self, directions: Tensor) -> Tensor:
- """Returns a set of witness points representing contact with another
- shape off in the direction(s) ``directions``.
-
- This method will return a set of points ``S' \\subset S`` such that::
-
- argmax_{s \\in S} s \\cdot directions \\subset convexHull(S').
-
-
- In theory, returning exactly the argmax set would be sufficient.
- However,
-
- Args:
- directions: (\*, 3) batch of unit-length directions.
-
- Returns:
- (\*, N, 3) sets of corresponding witness points of cardinality N.
- """
-
-
-class SparseVertexConvexCollisionGeometry(BoundedConvexCollisionGeometry):
- """Partial implementation of ``BoundedConvexCollisionGeometry`` when
- witness points are guaranteed to be contained in a small set of vertices.
-
- An obvious subtype is any sort of polytope, such as a ``Box``. A less
- obvious subtype are shapes in which a direction-dependent set of vertices
- can be easily calculated. See ``Cylinder``, for instance.
- """
-
- def __init__(self, n_query: int) -> None:
- """Inits ``SparseVertexConvexCollisionGeometry`` with prescribed
- query interface.
-
- Args:
- n_query: number of vertices to return in witness point set.
- """
- super().__init__()
- self.n_query = n_query
-
- def support_points(self, directions: Tensor) -> Tensor:
- """Implements ``BoundedConvexCollisionGeometry.support_points()`` via
- brute force optimization over the witness vertex set.
-
- Specifically, if S_v is the vertex set, this function returns
- ``n_query`` elements s of S_v for which ``s \\cdot directions`` is
- highest. This set is not guaranteed to be sorted.
-
- Given the prescribed behavior of
- ``BoundedConvexCollisionGeometry.support_points()``, an implicit
- assumption of this implementation is that the convex hull of the top
- ``n_query`` points in S_v contains ``argmax_{s \\in S} s \\cdot
- directions``.
-
- Args:
- directions: (\*, 3) batch of directions.
-
- Returns:
- (\*, n_query, 3) sets of corresponding witness points.
- """
- assert directions.shape[-1] == 3
- original_shape = directions.shape
-
- # reshape to (product(*),3)
- directions = directions.view(-1, 3)
-
- # pylint: disable=E1103
- batch_range = torch.arange(directions.shape[0])
- vertices = self.get_vertices(directions)
- dots = pbmm(directions.unsqueeze(-2),
- vertices.transpose(-1, -2)).squeeze(-2)
-
- # top dot product indices in shape (product(*), n_query)
- # pylint: disable=E1103
- selections = torch.topk(dots, self.n_query, dim=-1,
- sorted=False).indices.t()
-
- top_vertices = torch.stack(
- [vertices[batch_range, selection] for selection in selections], -2)
- # reshape to (*, n_query, 3)
- return top_vertices.view(original_shape[:-1] + (self.n_query, 3))
-
- @abstractmethod
- def get_vertices(self, directions: Tensor) -> Tensor:
- """Returns sparse witness point set as collection of vertices.
-
- Specifically, given search directions, returns a set of points
- ``S_v`` for which::
-
- argmax_{s \\in S} s \\cdot directions \\subset convexHull(S_v).
-
- Args:
- directions: (\*, 3) batch of unit-length directions.
- Returns:
- (\*, N, 3) witness point convex hull vertices.
- """
-
-
-class Polygon(SparseVertexConvexCollisionGeometry):
- """Concrete implementation of a convex polytope.
-
- Implemented via ``SparseVertexConvexCollisionGeometry`` as a static set
- of vertices, where models the underlying shape as all convex combinations
- of the vertices.
- """
- vertices_parameter: Parameter
-
- def __init__(self,
- vertices: Tensor,
- n_query: int = _POLYGON_DEFAULT_N_QUERY) -> None:
- """Inits ``Polygon`` object with initial vertex set.
-
- Args:
- vertices: (N, 3) static vertex set.
- n_query: number of vertices to return in witness point set.
- """
- super().__init__(n_query)
- scaled_vertices = vertices.clone()/_NOMINAL_HALF_LENGTH
- self.vertices_parameter = Parameter(scaled_vertices, requires_grad=True)
-
- def get_vertices(self, directions: Tensor) -> Tensor:
- """Return batched view of static vertex set"""
- scaled_vertices = _NOMINAL_HALF_LENGTH * self.vertices_parameter
- return scaled_vertices.expand(
- directions.shape[:-1] + scaled_vertices.shape)
-
- def scalars(self) -> Dict[str, float]:
- """Return one scalar for each vertex index."""
- scalars = {}
- axes = ['x', 'y', 'z']
-
- # Use arbitrary direction to query the Polygon's vertices (value does
- # not matter).
- arbitrary_direction = torch.ones((1,3))
- vertices = self.get_vertices(arbitrary_direction).squeeze(0)
-
- for axis, values in zip(axes, vertices.t()):
- for vertex_index, value in enumerate(values):
- scalars[f'v{vertex_index}_{axis}'] = value.item()
- return scalars
-
-
-class DeepSupportConvex(SparseVertexConvexCollisionGeometry):
- r"""Deep support function convex shape.
-
- Any convex shape :math:`S` can be equivalently represented via its support
- function :math:`f(d)`, which returns the extent to which the object
- extends in the :math:`d` direction:
-
- .. math::
-
- f(d) = \max_{s \in S} s \cdot d.
-
- Given a direction, the set of points that form the :math:`\arg\max` in
- :math:`f(d)` is exactly the convex subgradient :math:`\partial_d f(d)`.
-
- Furthermore, for every convex shape, :math:`f(d)` is convex and
- positively homogeneous, and every convex and positively homogeneous
- :math:`f(d)` is the support function of some convex shape.
-
- This collision geometry type implements the support function directly as
- a convex and positively homogeneous neural network (
- :py:class:`~dair_pll.deep_support_function.HomogeneousICNN`\)."""
- network: HomogeneousICNN
- """Support function representation as a neural net."""
- perturbations: Tensor
- """Perturbed support directions, which aid mesh-plane contact stability."""
- fcl_geometry: fcl.BVHModel
- r""":py:mod:`fcl` mesh collision geometry representation."""
-
- def __init__(self,
- vertices: Tensor,
- n_query: int = _DEEP_SUPPORT_DEFAULT_N_QUERY,
- depth: int = _DEEP_SUPPORT_DEFAULT_DEPTH,
- width: int = _DEEP_SUPPORT_DEFAULT_WIDTH,
- perturbation: float = 0.4) -> None:
- r"""Inits ``DeepSupportConvex`` object with initial vertex set.
-
- When calculating a sparse vertex set with :py:meth:`get_vertices`,
- supplements the support direction with nearby directions randomly.
-
- Args:
- vertices: ``(N, 3)`` initial vertex set.
- n_query: Number of vertices to return in witness point set.
- depth: Depth of support function network.
- width: Width of support function network.
- perturbation: support direction sampling parameter.
- """
- # pylint: disable=too-many-arguments,E1103
- super().__init__(n_query)
- length_scale = (vertices.max(dim=0).values -
- vertices.min(dim=0).values).norm() / 2
- self.network = HomogeneousICNN(depth, width, scale=length_scale)
- self.perturbations = torch.cat((torch.zeros(
- (1, 3)), perturbation * (torch.rand((n_query - 1, 3)) - 0.5)))
-
- def get_vertices(self, directions: Tensor) -> Tensor:
- """Return batched view of support points of interest.
-
- Given a direction :math:`d`, this function finds the support point of
- the object in that direction, calculated via envelope
-
- Args:
- directions: ``(*, 3)`` batch of support directions sample.
-
- Returns:
- ``(*, n_query, 3)`` sampled support points.
- """
- perturbed = directions.unsqueeze(-2)
- perturbed = tile_dim(perturbed, self.n_query, -2)
- perturbed += self.perturbations.expand(perturbed.shape)
- perturbed /= perturbed.norm(dim=-1, keepdim=True)
- return self.network(perturbed)
-
- def train(self, mode: bool = True) -> DeepSupportConvex:
- r"""Override training-mode setter from :py:mod:`torch`.
-
- Sets a static fcl mesh geometry for the entirety of evaluation time,
- as the underlying support function is not changing.
-
- Args:
- mode: ``True`` for training, ``False`` for evaluation.
-
- Returns:
- ``self``.
- """
- if not mode:
- self.fcl_geometry = self.get_fcl_geometry()
- return cast(DeepSupportConvex, super().train(mode))
-
- def get_fcl_geometry(self) -> fcl.BVHModel:
- """Retrieves :py:mod:`fcl` mesh collision geometry representation.
-
- If evaluation mode is set, retrieves precalculated version.
-
- Returns:
- :py:mod:`fcl` bounding volume hierarchy for mesh.
- """
- if self.training:
- mesh = extract_mesh_from_support_function(self.network)
- vertices = mesh.vertices.numpy()
- faces = mesh.faces.numpy()
- self.fcl_geometry = fcl.BVHModel()
- self.fcl_geometry.beginModel(vertices.shape[0], faces.shape[0])
- self.fcl_geometry.addSubModel(vertices, faces)
- self.fcl_geometry.endModel()
-
- return self.fcl_geometry
-
- def scalars(self) -> Dict[str, float]:
- """no scalars!"""
- return {}
-
-
-class Box(SparseVertexConvexCollisionGeometry):
- """Implementation of cuboid geometry as a sparse vertex convex hull.
-
- To prevent learning negative box lengths, the learned parameters are stored
- as :py:attr:`length_params`, and the box's half lengths can be computed
- as their absolute value. The desired half lengths can be accessed via
- :py:meth:`get_half_lengths`.
- """
- length_params: Parameter
- unit_vertices: Tensor
-
- def __init__(self, half_lengths: Tensor, n_query: int) -> None:
- """Inits ``Box`` object with initial size.
-
- Args:
- half_lengths: (3,) half-length dimensions of box on x, y,
- and z axes.
- n_query: number of vertices to return in witness point set.
- """
- super().__init__(n_query)
-
- assert half_lengths.numel() == 3
-
- scaled_half_lengths = half_lengths.clone()/_NOMINAL_HALF_LENGTH
- self.length_params = Parameter(scaled_half_lengths.view(1, -1),
- requires_grad=True)
- self.unit_vertices = _UNIT_BOX_VERTICES.clone()
-
- def get_half_lengths(self) -> Tensor:
- """From the stored :py:attr:`length_params`, compute the half lengths of
- the box as its absolute value."""
- return torch.abs(self.length_params) * _NOMINAL_HALF_LENGTH
-
- def get_vertices(self, directions: Tensor) -> Tensor:
- """Returns view of cuboid's static vertex set."""
- return (self.unit_vertices *
- self.get_half_lengths()).expand(directions.shape[:-1] +
- self.unit_vertices.shape)
-
- def scalars(self) -> Dict[str, float]:
- """Returns each axis's full length as a scalar."""
- scalars = {
- f'len_{axis}': 2 * value.item()
- for axis, value in zip(['x', 'y', 'z'],
- self.get_half_lengths().view(-1))
- }
- return scalars
-
-
-class Sphere(BoundedConvexCollisionGeometry):
- """Implements sphere geometry via its support function.
-
- It is trivial to calculate the witness point for a sphere contact as
- simply the product of the sphere's radius and the support direction.
-
- To prevent learning a negative radius, the learned parameter is stored as
- :py:attr:`length_param`, and the sphere's radius can be computed as its
- absolute value. The desired radius can be accessed via
- :py:meth:`get_radius`.
- """
- length_param: Parameter
-
- def __init__(self, radius: Tensor) -> None:
- super().__init__()
- assert radius.numel == 1
-
- self.length_param = Parameter(radius.clone().view(()),
- requires_grad=True)
-
- def get_radius(self) -> Tensor:
- """From the stored :py:attr:`length_param`, compute the radius of the
- sphere as its absolute value."""
- return torch.abs(self.length_param)
-
- def support_points(self, directions: Tensor) -> Tensor:
- """Implements ``BoundedConvexCollisionGeometry.support_points()``
- via analytic expression::
-
- argmax_{s \\in S} s \\cdot directions = directions * radius.
-
- Args:
- directions: (\*, 3) batch of directions.
-
- Returns:
- (\*, 1, 3) corresponding witness point sets of cardinality 1.
- """
- return (directions.clone() * self.get_radius()).unsqueeze(-2)
-
- def scalars(self) -> Dict[str, float]:
- """Logs radius as a scalar."""
- return {'radius': self.get_radius().item()}
-
-
-class PydrakeToCollisionGeometryFactory:
- """Utility class for converting Drake ``Shape`` instances to
- ``CollisionGeometry`` instances."""
-
- @staticmethod
- def convert(drake_shape: Shape, represent_geometry_as: str
- ) -> CollisionGeometry:
- """Converts abstract ``pydrake.geometry.shape`` to
- ``CollisionGeometry`` according to the desired ``represent_geometry_as``
- type.
-
- Notes:
- The desired ``represent_geometry_as`` type only will affect
- ``DrakeBox`` and ``DrakeMesh`` types, not ``DrakeHalfSpace`` types.
-
- Args:
- drake_shape: drake shape type to convert.
-
- Returns:
- Collision geometry representation of shape.
-
- Raises:
- TypeError: When provided object is not a supported Drake shape type.
- """
- if isinstance(drake_shape, DrakeBox):
- return PydrakeToCollisionGeometryFactory.convert_box(
- drake_shape, represent_geometry_as)
- if isinstance(drake_shape, DrakeHalfSpace):
- return PydrakeToCollisionGeometryFactory.convert_plane()
- if isinstance(drake_shape, DrakeMesh):
- return PydrakeToCollisionGeometryFactory.convert_mesh(
- drake_shape, represent_geometry_as)
- raise TypeError(
- "Unsupported type for drake Shape() to"
- "CollisionGeometry() conversion:", type(drake_shape))
-
- @staticmethod
- def convert_box(drake_box: DrakeBox, represent_geometry_as: str
- ) -> Union[Box, Polygon]:
- """Converts ``pydrake.geometry.Box`` to ``Box`` or ``Polygon``."""
- if represent_geometry_as == 'box':
- half_widths = 0.5 * Tensor(np.copy(drake_box.size()))
- return Box(half_widths, 4)
-
- if represent_geometry_as == 'polygon':
- pass
-
- raise NotImplementedError(f'Cannot presently represent a DrakeBox()' + \
- f'as {represent_geometry_as} type.')
-
- @staticmethod
- def convert_plane() -> Plane:
- """Converts ``pydrake.geometry.HalfSpace`` to ``Plane``."""
- return Plane()
-
- @staticmethod
- def convert_mesh(drake_mesh: DrakeMesh, represent_geometry_as: str
- ) -> Union[DeepSupportConvex, Polygon]:
- """Converts ``pydrake.geometry.Mesh`` to ``Polygon`` or
- ``DeepSupportConvex``."""
- filename = drake_mesh.filename()
- mesh = pywavefront.Wavefront(filename)
- vertices = Tensor(mesh.vertices)
-
- if represent_geometry_as == 'mesh':
- return DeepSupportConvex(vertices)
-
- if represent_geometry_as == 'polygon':
- return Polygon(vertices)
-
- raise NotImplementedError(f'Cannot presently represent a ' + \
- f'DrakeMesh() as {represent_geometry_as} type.')
-
-
-class GeometryCollider:
- """Utility class for colliding two ``CollisionGeometry`` instances."""
-
- @staticmethod
- def collide(geometry_a: CollisionGeometry, geometry_b: CollisionGeometry,
- R_AB: Tensor, p_AoBo_A: Tensor) -> \
- Tuple[Tensor, Tensor, Tensor, Tensor]:
- """Collides two collision geometries.
-
- Takes in the two geometries as well as a relative transform between
- them. This function is a thin shell for other static methods of
- ``GeometryCollider`` where the given geometries are guaranteed to
- have specific types.
-
- Args:
- geometry_a: first collision geometry
- geometry_b: second collision geometry, with type
- ordering ``not geometry_A > geometry_B``.
- R_AB: (\*,3,3) rotation between geometry frames
- p_AoBo_A: (\*, 3) offset of geometry frame origins
-
- Returns:
- (\*, N) batch of witness point pair distances
- (\*, N, 3, 3) contact frame C rotation in A, R_AC, where the z
- axis of C is contained in the normal cone of body A at contact
- point Ac and is parallel (or antiparallel) to AcBc.
- (\*, N, 3) witness points Ac on A, p_AoAc_A
- (\*, N, 3) witness points Bc on B, p_BoBc_B
- """
- assert not geometry_a > geometry_b
-
- # case 1: half-space to compact-convex collision
- if isinstance(geometry_a, Plane) and isinstance(
- geometry_b, BoundedConvexCollisionGeometry):
- return GeometryCollider.collide_plane_convex(
- geometry_b, R_AB, p_AoBo_A)
- if isinstance(geometry_a, DeepSupportConvex) and isinstance(
- geometry_b, DeepSupportConvex):
- return GeometryCollider.collide_mesh_mesh(geometry_a, geometry_b,
- R_AB, p_AoBo_A)
- raise TypeError(
- "No type-specific implementation for geometry "
- "pair of following types:",
- type(geometry_a).__name__,
- type(geometry_b).__name__)
-
- @staticmethod
- def collide_plane_convex(geometry_b: BoundedConvexCollisionGeometry,
- R_AB: Tensor, p_AoBo_A: Tensor) -> \
- Tuple[Tensor, Tensor, Tensor, Tensor]:
- """Implementation of ``GeometryCollider.collide()`` when
- ``geometry_a`` is a ``Plane`` and ``geometry_b`` is a
- ``BoundedConvexCollisionGeometry``."""
- R_BA = R_AB.transpose(-1, -2)
-
- # support direction on body B is negative z axis of A frame,
- # in B frame coordinates, i.e. the final column of ``R_BA``.
- directions_b = -R_BA[..., 2]
-
- # B support points of shape (*, N, 3)
- p_BoBc_B = geometry_b.support_points(directions_b)
- p_AoBc_A = pbmm(p_BoBc_B, R_BA) + p_AoBo_A.unsqueeze(-2)
-
- # phi is the A-axes z coordinate of Bc
- phi = p_AoBc_A[..., 2]
-
- # Ac is the projection of Bc onto the z=0 plane in frame A.
- # pylint: disable=E1103
- p_AoAc_A = torch.cat(
- (p_AoBc_A[..., :2], torch.zeros_like(p_AoBc_A[..., 2:])), -1)
-
- # ``R_AC`` (\*, N, 3, 3) is simply a batch of identities, as the z
- # axis of A points out of the plane.
- # pylint: disable=E1103
- R_AC = torch.eye(3).expand(p_AoAc_A.shape + (3,))
- return phi, R_AC, p_AoAc_A, p_BoBc_B
-
- @staticmethod
- def collide_mesh_mesh(
- geometry_a: DeepSupportConvex, geometry_b: DeepSupportConvex,
- R_AB: Tensor,
- p_AoBo_A: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
- """Implementation of ``GeometryCollider.collide()`` when
- both geometries are ``DeepSupportConvex``\es."""
- # pylint: disable=too-many-locals
- p_AoBo_A = p_AoBo_A.unsqueeze(-2)
- original_batch_dims = p_AoBo_A.shape[:-2]
- p_AoBo_A = p_AoBo_A.view(-1, 3)
- R_AB = R_AB.view(-1, 3, 3)
- batch_range = p_AoBo_A.shape[0]
-
- # Assume collision directions are piecewise constant, which allows us
- # to use :py:mod:`fcl` to compute the direction, without the need to
- # differentiate through it.
- # pylint: disable=E1103
- directions = torch.zeros_like(p_AoBo_A)
-
- # setup fcl=
- a_obj = fcl.CollisionObject(geometry_a.get_fcl_geometry(),
- fcl.Transform())
- b_obj = fcl.CollisionObject(geometry_b.get_fcl_geometry(),
- fcl.Transform())
- collision_request = fcl.CollisionRequest()
- collision_request.enable_contact = True
- distance_request = fcl.DistanceRequest()
- distance_request.enable_nearest_points = True
-
- for transform_index in range(batch_range):
- b_t = fcl.Transform(R_AB[transform_index].detach().numpy(),
- p_AoBo_A[transform_index].detach().numpy())
- b_obj.setTransform(b_t)
- result = fcl.CollisionResult()
- if fcl.collide(a_obj, b_obj, collision_request, result) > 0:
- # Collision detected.
- # Assume only 1 contact point.
- directions[transform_index] += result.contacts[0].normal
- else:
- result = fcl.DistanceResult()
- fcl.distance(a_obj, b_obj, distance_request, result)
- directions[transform_index] += Tensor(result.nearest_points[1] -
- result.nearest_points[0])
- directions /= directions.norm(dim=-1, keepdim=True)
- R_AC = rotation_matrix_from_one_vector(directions, 2)
- p_AoAc_A = geometry_a.network(directions)
- p_BoBc_B = geometry_b.network(
- -pbmm(directions.unsqueeze(-2), R_AB).squeeze(-2))
- p_BoBc_A = pbmm(p_BoBc_B.unsqueeze(-2), R_AB.transpose(-1,
- -2)).squeeze(-2)
- p_AcBc_A = -p_AoAc_A + p_AoBo_A + p_BoBc_A
-
- phi = (p_AcBc_A * R_AC[..., 2]).sum(dim=-1)
-
- phi = phi.reshape(original_batch_dims + (1,))
- R_AC = R_AC.reshape(original_batch_dims + (1, 3, 3))
- p_AoAc_A = p_AoAc_A.reshape(original_batch_dims + (1, 3))
- p_BoBc_B = p_BoBc_B.reshape(original_batch_dims + (1, 3))
- return phi, R_AC, p_AoAc_A, p_BoBc_B
diff --git a/dair_pll_old/dair_pll/hyperparameter.py b/dair_pll_old/dair_pll/hyperparameter.py
deleted file mode 100644
index 100f9c2..0000000
--- a/dair_pll_old/dair_pll/hyperparameter.py
+++ /dev/null
@@ -1,309 +0,0 @@
-r"""Interface for hyperparameter declaration and optimization.
-
-Each experiment run (see
-:py:class:`~dair_pll.experiment.SupervisedLearningExperiment`\ ) can
-have its
-hyperparameters optimized via :py:mod:`optuna`\ . By design, each experiment
-is fully described via a
-:py:class:`~dair_pll.experiment_config.SupervisedLearningExperimentConfig`
-object. This file implements a :py:class:`Hyperparameter` class, which can be
-declared as a member variable of a
-:py:class:`~dair_pll.experiment_config.SupervisedLearningExperimentConfig`,
-or recursively as one of its own :py:func:`~dataclasses.dataclass` members.
-
-The following hyperparameters types and priors are supported:
-
- * :py:class:`Scalar`, a :py:class:`Float` or :py:class:`Int` which is
- either uniformly or log-uniformly distributed.
- * :py:class:`Categorical`, a list of :py:class:`float`\ , :py:class:`int`\
- , or :py:class:`str` types which are selected from uniformly.
-
-"""
-from abc import abstractmethod, ABC
-from dataclasses import is_dataclass
-from typing import Tuple, TypeVar, Sequence, List, Union, Optional, \
- Dict, Generic, Callable, Any
-
-from optuna.trial import Trial
-
-ValueType = Union[int, float, str]
-ValueDict = Dict[str, ValueType]
-
-ScalarT = TypeVar('ScalarT', int, float)
-r"""Templating type hint for :py:class:`Scalar`\ s."""
-
-
-class Hyperparameter(ABC):
- """Class for declaring and sampling hyperparameters.
-
- Hyperparameters have both a :py:attr:`value` and a
- :py:attr:`distribution` from which vales might be selected.
-
- Declaration of a hyperparameter in a configuration may look like::
-
- @dataclass
- class XXXConfig:
- int_par: Int = Int(5, (0, 10))
- float_par: Float = Float(0.1, (1e-4, 1e3), log=True)
- cat_par: Categorical = Categorical('val2', ['val0','val1', 'val2'])
-
- In these cases, the first argument is the default :py:attr:`value` of the
- hyperparameter. However, at hyperparameter optimization time,
- :py:mod:`optuna` will select hyperparameters from the
- :py:attr:`distribution` via the :py:meth:`suggest` function. Some
- hyperparameter types have a default distribution as described in their
- documentation."""
-
- value: ValueType
- """Hyperparameter's current value."""
- distribution: Sequence[ValueType]
- """Parameters for distribution from which to select value."""
-
- def __init__(self, value: ValueType, distribution: Sequence[ValueType]):
- self.distribution = distribution
- self.value = value
-
- def set(self, value: ValueType):
- """Setter for underlying hyperparameter value."""
- self.value = value
-
- def __repr__(self) -> str:
- """Human-readable representation of underlying hyperparameter value."""
- return f'{type(self).__qualname__}({str(self.value)})'
-
- @abstractmethod
- def suggest(self, trial: Trial, name: str) -> ValueType:
- r"""Suggests a value for the hyperparameter.
-
- This function is abstract as to facilitate specialization for
- inheriting types.
-
- Args:
- trial: Optuna trial in which parameter is being suggested.
- name: Name of hyperparameter.
-
- Returns:
- Suggested hyperparameter value.
- """
-
-
-class Scalar(Hyperparameter, ABC, Generic[ScalarT]):
- r"""Abstract scalar hyperparameter type.
-
- Defines a uniform or log-uniform distribution over a scalar type, such as
- integers or real numbers.
-
- The bounds of the distribution can either be specified as a tuple in the
- :py:attr:`distribution` attribute, or set as a default based on the
- provided :py:attr:`value` in the abstract method :py:meth:`default_range`\ .
- """
- value: ScalarT
- """Scalar value of hyperparameter."""
- distribution: Tuple[ScalarT, ScalarT]
- """Bounds of scalar distribution in format (lower, upper)."""
- log: bool
- """Whether the distribution is uniform or log-uniform."""
-
- def __init__(self,
- value: ScalarT,
- distribution: Optional[Tuple[ScalarT, ScalarT]] = None,
- log: bool = False):
- if not distribution:
- distribution = self.default_range(value, log)
- assert distribution[1] >= distribution[0]
- if log:
- assert distribution[0] > 0
-
- super().__init__(value, distribution)
- self.log = log
-
- @abstractmethod
- def default_range(self, value: ScalarT,
- log: bool) -> Tuple[ScalarT, ScalarT]:
- """Returns default range for Scalar, depending on provided value."""
-
-
-INT_LOG_WIDTH = 2**3
-INT_ABS_WIDTH = 2
-
-
-class Int(Scalar):
- """Integer scalar hyperparameter."""
- value: int
- distribution: Tuple[int, int]
-
- def default_range(self, value: int, log: bool) -> Tuple[int, int]:
- """Default bounds for integer hyperparameter.
-
- Returns ``(max(1, value // RANGE), value * RANGE)``, where ``RANGE``
- is ``8`` in the log-uniform case and ``2`` otherwise.
-
- Args:
- value: Default value of hyperparameter.
- log: Whether the distribution is uniform or log-uniform.
-
- Returns:
- Default lower/upper bounds."""
- width = INT_LOG_WIDTH if log else INT_ABS_WIDTH
- return max(1, value // width), value * width
-
- def suggest(self, trial: Trial, name: str) -> int:
- r"""Returns suggested (log)-uniform distributed integer."""
- return trial.suggest_int(name, *self.distribution, log=self.log)
-
-
-FLOAT_LOG_WIDTH = 1e2
-FLOAT_ABS_WIDTH = 2.
-
-
-class Float(Scalar):
- """Real number (floating-point) scalar hyperparameter."""
- value: float
- distribution: Tuple[float, float]
-
- def default_range(self, value: float, log: bool) -> Tuple[float, float]:
- """Default bounds for float hyperparameter.
-
- Returns ``(value / RANGE, value * RANGE)``, where ``RANGE``
- is ``100`` in the log-uniform case and ``2`` otherwise.
-
- Args:
- value: Default value of hyperparameter.
- log: Whether the distribution is uniform or log-uniform.
-
- Returns:
- Default lower/upper bounds.
- """
- width = FLOAT_LOG_WIDTH if log else FLOAT_ABS_WIDTH
- return value / width, value * width
-
- def suggest(self, trial: Trial, name: str) -> float:
- r"""Returns suggested (log)-uniform distributed float."""
- if self.log:
- return trial.suggest_loguniform(name, *self.distribution)
- return trial.suggest_uniform(name, *self.distribution)
-
-
-# Only one new public method, but just happens to be a particularly simple
-# inheriting type of Hyperparameter.
-class Categorical(Hyperparameter): # pylint: disable=too-few-public-methods
- """Categorical hyperparameter."""
- value: ValueType
- distribution: List[ValueType]
-
- def suggest(self, trial: Trial, name: str) -> ValueType:
- """Suggests from listed values in distribution uniformly."""
- suggestion = trial.suggest_categorical(name, self.distribution)
- assert isinstance(suggestion, (float, int, str))
- return suggestion
-
-
-def is_dataclass_instance(obj) -> bool:
- r"""Helper function to check if input object is a
- :py:func:`~dataclasses.dataclass`\ ."""
- return is_dataclass(obj) and not isinstance(obj, type)
-
-
-def traverse_config(config: Any,
- callback: Callable[[str, Hyperparameter], None],
- namespace: str = '') -> None:
- r"""Recursively searches through ``config`` and its member
- :py:func:`~dataclasses.dataclass`\ es, for member
- :py:class:`Hyperparameter` objects.
-
- While traversing the tree, maintains a `namespace` constructed from a
- concatenation of the attributes' names.
-
- When a :py:class:`Hyperparameter` 'h' under attribute name `attr` is
- encountered, this function calls :py:arg:`callback` with inputs
- ``(namespace + attr, h)``\ .
-
- Args:
- config: Configuration :py:func:`~dataclasses.dataclass` \ .
- callback: Callback performed on each :py:class:`Hyperparameter`\ .
- namespace: (Optional/internal) prefix for naming hyperparameters.
- """
- assert is_dataclass_instance(config)
-
- for field in config.__dataclass_fields__:
- value = getattr(config, field)
- if is_dataclass_instance(value):
- subspace = f'{namespace}{field}.'
- traverse_config(value, callback, subspace)
- if isinstance(value, Hyperparameter):
- name = namespace + field
- callback(name, value)
-
-
-def generate_suggestion(config, trial: Trial) -> ValueDict:
- r"""Suggests a value all hyperparameters in configuration (but does not
- set these values).
-
- Recursively searches through ``config`` and its member
- :py:func:`~dataclasses.dataclass`\ es, and generates a suggestion for
- each contained :py:class:`Hyperparameter`\ .
-
- Args:
- config: Configuration :py:func:`~dataclasses.dataclass` \ .
- trial: Optuna trial in which parameters are being suggested.
-
- Returns:
- Suggested hyperparameter value dictionary.
- """
- assert is_dataclass_instance(config)
-
- out_dict = {}
-
- def callback(name: str, hyperparameter: Hyperparameter):
- out_dict[name] = hyperparameter.suggest(trial, name)
-
- traverse_config(config, callback)
-
- return out_dict
-
-
-def load_suggestion(config: Any, suggestion: ValueDict) -> None:
- r"""Fill all hyperparameters in configuration with suggestions.
-
- Recursively searches through ``config`` and its member
- :py:func:`~dataclasses.dataclass`\ es, and sets the values to the
- suggestion for each contained :py:class:`Hyperparameter`\ .
-
- The ``suggestion`` is assumed to be generated by running
- :py:func:`generate_suggestion` on an identical type to ``config``.
-
- Args:
- config: Configuration :py:func:`~dataclasses.dataclass` \ .
- suggestion: Suggested hyperparameter set.
- """
- assert is_dataclass_instance(config)
-
- def callback(name: str, hyperparameter: Hyperparameter):
- hyperparameter.set(suggestion[name])
-
- traverse_config(config, callback)
-
-
-def hyperparameter_values(config: Any) -> ValueDict:
- r"""Lists current values for all hyperparameters in configuration.
-
- Recursively searches through ``config`` and its member
- :py:func:`~dataclasses.dataclass`\ es, and records value for each contained
- :py:class:`Hyperparameter`\ .
-
- Args:
- config: Configuration :py:func:`~dataclasses.dataclass` \ .
-
- Returns:
- Hyperparameter value dictionary.
- """
- assert is_dataclass_instance(config)
-
- out_dict = {}
-
- def callback(name: str, hyperparameter: Hyperparameter):
- out_dict[name] = hyperparameter.value
-
- traverse_config(config, callback)
-
- return out_dict
diff --git a/dair_pll_old/dair_pll/inertia.py b/dair_pll_old/dair_pll/inertia.py
deleted file mode 100644
index 70fa91d..0000000
--- a/dair_pll_old/dair_pll/inertia.py
+++ /dev/null
@@ -1,340 +0,0 @@
-r"""Utilities for transforming representations of rigid body inertia.
-
-The inertial parameterization of a body :math:`B` is given by 10 degrees of
-freedom:
-
- * :math:`m` (1 DoF), the mass.
- * :math:`^{Bo}p^{Bcm}` (3 DoF), the position of the center of mass
- :math:`Bcm` relative to the body's origin::
-
- p_BoBcm_B == [p_x, p_y, p_z]
-
- * :math:`I^{B/Bcm}` (6 DoF), the symmetric 3x3 inertia dyadic about
- the body's center of mass :math:`Bcm`::
-
- I_BBcm_B == [[I_xx, I_xy, I_xz],[I_xy, I_yy, I_yz],[I_xz, I_yz, I_zz]]
-
-Here we list and several formats that can be converted between each other
-freely, under the assumption that their values are non-degenerate and valid (
-i.e. :math:`m > 0`).
-
- * ``pi_cm`` is an intuitive formatting of these 10 Dof as a standard vector in
- :math:`\mathbb{R}^{10}` as::
-
- [m, m * p_x, m * p_y, m * p_z, I_xx, I_yy, I_zz, I_xy, I_xz, I_yz]
-
- * ``drake_spatial_inertia_vector`` is a scaling that can be used to
- construct a new Drake :py:attr:`~pydrake.multibody.tree.SpatialInertia
- `, where the inertial tensor is normalized by mass (see
- :py:attr:`~pydrake.multibody.tree.UnitInertia`)::
-
- [m, p_x, p_y, p_z, ...
- I_xx , I_yy , I_zz , I_xy, I_xz, I_yz]
-
- * ``drake`` is a packaging of ``drake_inertia_vector`` into
- a Drake :py:attr:`~pydrake.multibody.tree.SpatialInertia` object,
- with member callbacks to access the terms::
-
- SpatialInertia.get_mass() == m
- SpatialInertia.get_com() == p_BoBcm_B
- SpatialInertia.CalcRotationalInertia() == I_BBo_B
- SpatialInertia.Shift(p_BoBcm_B).CalcRotationalInertia() == I_BBcm_B
-
- * ``theta`` is a format designed for underlying smooth, unconstrained,
- and non-degenerate parameterization of rigid body inertia. For a body,
- any value in :math:`\mathbb{R}^{10}` for ``theta`` can be mapped to a
- valid and non-degenerate set of inertial terms as follows::
-
- theta == [log_m, h_x, h_y, h_z, d_1, d_2, d_3, s_12, s_13, s_23]
- m == exp(log_m)
- p_BoBcm_B == [h_x, h_y, h_z] / m
- I_BBcm_B = trace(Sigma(theta)) * I_3 - Sigma(theta)
-
- where ``Sigma`` :math:`(\Sigma)` is constructed via log-Cholesky
- parameterization, similar to the one in Rucker and Wensing [1]_ :
-
- .. math::
- \begin{align}
- \Sigma &= L L^T, \\
- L &= \begin{bmatrix} \exp(d_1) & 0 & 0 \\
- s_{12} & \exp(d_2) & 0 \\
- s_{13} & s_{23} & \exp(d_3)
- \end{bmatrix}.
- \end{align}
-
-
- While this parameterization is distinct, it retains the diffeomorphism
- between :math:`\theta \in \mathbb{R}^{10}` and valid rigid body
- inertia. Note that we use the Drake ordering of the inertial
- off-diagonal terms as ``[Ixy Ixz Iyz]``, whereas Rucker and Wensing
- [1]_ uses ``[Ixy Iyz Ixz]``.
-
- * ``urdf`` is the string format in which inertial parameters are stored,
- represented as the tuple::
-
- "m", "p_x p_y p_z", ["I_xx", "I_yy", "I_zz", "I_xy", "I_xz", "I_yz"]
-
- * ``scalars`` is the string dictionary format for printing on tensorboard::
-
- {"m": m, "p_x": p_x, ... "I_yz": I_yz}
-
-Various transforms between these types are implemented in this module through
-the :py:class:`InertialParameterConverter` class.
-
-.. [1] C. Rucker and P. M. Wensing, "Smooth Parameterization of Rigid-Body
- Inertia", IEEE RA-L 2020, https://doi.org/10.1109/LRA.2022.3144517
-"""
-from typing import Any, Tuple, List, Dict
-
-import torch
-from torch import Tensor
-
-from dair_pll.drake_utils import DrakeSpatialInertia
-from dair_pll.tensor_utils import skew_symmetric, symmetric_offdiagonal, \
- pbmm, trace_identity
-
-torch.set_default_dtype(torch.float64) # pylint: disable=no-member
-
-INERTIA_INDICES = [(0, 0), (1, 1), (2, 2), (0, 1), (0, 2), (1, 2)]
-INERTIA_SCALARS = ["I_xx", "I_yy", "I_zz", "I_xy", "I_xz", "I_yz"]
-AXES = ["x", "y", "z"]
-
-
-def number_to_float(number: Any) -> float:
- """Converts a number to float via intermediate string representation."""
- return float(str(number))
-
-# pylint: disable=invalid-name
-def parallel_axis_theorem(I_BBa_B: Tensor,
- m_B: Tensor,
- p_BaBb_B: Tensor,
- Ba_is_Bcm: bool = True) -> Tensor:
- """Converts an inertia matrix represented from one reference point to that
- represented from another reference point. One of these reference points
- must be the center of mass.
-
- The parallel axis theorem states [2]:
-
- .. math::
-
- I_R = I_C - m_{tot} [d]^2
-
-
- ...for :math:`I_C` as the inertia matrix about the center of mass,
- :math:`I_R` as the moment of inertia about a point :math:`R` defined as
- :math:`R = C + d`, and :math:`m_{tot}` as the total mass of the body. The
- brackets in :math:`[d]` indicate the skew-symmetric matrix formed from the
- vector :math:`d`.
-
- [2] https://en.wikipedia.org/wiki/Moment_of_inertia#Parallel_axis_theorem
-
- Args:
- I_BBa_B: ``(*, 3, 3)`` inertia matrices.
- m_B: ``(*)`` masses.
- p_BaBb_B: ``(*, 3)`` displacement from current frame to new frame.
- Ba_is_Bcm: ``True`` if the provided I_BBa_B is from the perspective of
- the CoM, ``False`` if from the perspective of the origin.
-
- Returns:
- ``(*, 3, 3)`` inertia matrices with changed reference point.
- """
- d_squared = skew_symmetric(p_BaBb_B) @ skew_symmetric(p_BaBb_B)
- term = d_squared * m_B.view((-1, 1, 1))
-
- if Ba_is_Bcm:
- return I_BBa_B - term
-
- return I_BBa_B + term
-# pylint: enable=invalid-name
-
-def inertia_matrix_from_vector(I_BBa_B_vec: Tensor) -> Tensor:
- r"""Converts vectorized inertia vector of the following order into an
- inertia matrix:
-
- .. math::
-
- [I_{xx}, I_{yy}, I_{zz}, I_{xy}, I_{xz}, I_{yz}] \Rightarrow
- \begin{bmatrix} I_{xx} & I_{xy} & I_{xz} \\
- I_{xy} & I_{yy} & I_{yz} \\
- I_{xz} & I_{yz} & I_{zz} \end{bmatrix}
-
- Args:
- I_BBa_B_vec: ``(*, 6)`` vectorized inertia parameters.
-
- Returns:
- ``(*, 3, 3)`` inertia matrix.
- """
- # Put Ixx, Iyy, Izz on the diagonals.
- diags = torch.diag_embed(I_BBa_B_vec[..., :3])
-
- # Put Ixy, Ixz, Iyz on the off-diagonals.
- off_diags = symmetric_offdiagonal(I_BBa_B_vec[..., 3:].flip(-1))
-
- return diags + off_diags
-
-
-def inertia_vector_from_matrix(I_BBa_B_mat: Tensor) -> Tensor:
- r"""Converts inertia matrix into vectorized inertia vector of the following
- order:
-
- .. math::
-
- \begin{bmatrix} I_{xx} & I_{xy} & I_{xz} \\
- I_{xy} & I_{yy} & I_{yz} \\
- I_{xz} & I_{yz} & I_{zz} \end{bmatrix} \Rightarrow
- [I_{xx}, I_{yy}, I_{zz}, I_{xy}, I_{xz}, I_{yz}]
- Args:
- I_BBa_B_mat: ``(*, 3, 3)`` inertia matrix.
-
- Returns:
- ``(*, 6)`` vectorized inertia parameters.
- """
- # Grab Ixx, Iyy, Izz on the diagonals.
- diagonals = I_BBa_B_mat.diagonal(dim1=-2, dim2=-1)
-
- # Grab Ixy, Ixz, Iyz on the off-diagonals individually.
- I_xy = I_BBa_B_mat[..., 0, 1]
- I_xz = I_BBa_B_mat[..., 0, 2]
- I_yz = I_BBa_B_mat[..., 1, 2]
-
- offdiagonals = torch.stack((I_xy, I_xz, I_yz), dim=-1)
-
- return torch.cat((diagonals, offdiagonals), dim=1)
-
-
-class InertialParameterConverter:
- """Utility class for transforming between inertial parameterizations."""
-
- @staticmethod
- def theta_to_pi_cm(theta: Tensor) -> Tensor:
- """Converts batch of ``theta`` parameters to ``pi_cm`` parameters.
-
- Args:
- theta: ``(*, 10)`` ``theta``-type parameterization.
-
- Returns:
- ``(*, 10)`` ``pi_cm``-type parameterization.
- """
- mass = torch.exp(theta[..., :1])
- h_vector = theta[..., 1:4]
- d_vector = theta[..., 4:7]
- s_vector = theta[..., 7:]
-
- diagonal_exp_d = torch.diag_embed(torch.exp(d_vector))
-
- # lower-triangular component of symmetrized
- lower_triangular_s = symmetric_offdiagonal(s_vector.flip(1)).tril()
-
- cholesky_sigma = diagonal_exp_d + lower_triangular_s
-
- sigma = pbmm(cholesky_sigma, cholesky_sigma.mT)
-
- I_BBcm_B = trace_identity(sigma) - sigma
-
- I_BBcm_B_vec = inertia_vector_from_matrix(I_BBcm_B)
-
- return torch.cat((mass, h_vector, I_BBcm_B_vec), dim=-1)
-
- @staticmethod
- def pi_cm_to_theta(pi_cm: Tensor) -> Tensor:
- """Converts batch of ``pi_cm`` parameters to ``theta`` parameters.
-
- Implements local inverse :py:meth:`theta_to_pi_cm` for valid ``pi_cm``.
-
- Args:
- pi_cm: ``(*, 10)`` ``pi_cm``-type parameterization.
-
- Returns:
- ``(*, 10)`` ``theta``-type parameterization.
- """
-
- log_m = torch.log(pi_cm[..., :1])
-
- h_vector = pi_cm[..., 1:4]
-
- I_BBcm_B = inertia_matrix_from_vector(pi_cm[..., 4:])
-
- sigma = 0.5 * trace_identity(I_BBcm_B) - I_BBcm_B
-
- cholesky_sigma = torch.linalg.cholesky(sigma)
-
- d_vector = torch.log(torch.diagonal(cholesky_sigma, dim1=-2, dim2=-1))
-
- s_vector = torch.stack(
- (cholesky_sigma[..., 1, 0], cholesky_sigma[..., 2, 0],
- cholesky_sigma[..., 2, 1]),
- dim=-1)
-
- return torch.cat((log_m, h_vector, d_vector, s_vector), -1)
-
- @staticmethod
- def pi_cm_to_drake_spatial_inertia_vector(pi_cm: Tensor) -> Tensor:
- """Converts batch of ``pi-cm`` parameters to ``drake_inertia_vector``
- parameters."""
- return torch.cat((pi_cm[..., 0:1],
- pi_cm[..., 1:4] / pi_cm[..., 0:1],
- pi_cm[..., 4:]),
- dim=-1)
-
- @staticmethod
- def pi_cm_to_urdf(pi_cm: Tensor) -> Tuple[str, str, List[str]]:
- """Converts a single ``(10,)`` ``pi_cm`` vector into the ``urdf`` string
- format."""
- assert len(pi_cm.shape) == 1
- mass = str(pi_cm[0].item())
- p_BoBcm_B = ' '.join(
- [str((coordinate / pi_cm[0]).item()) for coordinate in pi_cm[1:4]])
- I_BBcm_B = [
- str(inertia_element.item()) for inertia_element in pi_cm[4:]
- ]
-
- return mass, p_BoBcm_B, I_BBcm_B
-
- @staticmethod
- def drake_to_pi_cm(M_BBo_B: DrakeSpatialInertia) -> Tensor:
- """Extracts a ``pi_cm`` parameterization from a Drake
- :py:attr:`~pydrake.multibody.tree.SpatialInertia` object.
-
- Args:
- M_BBo_B: Drake spatial inertia of body, about body origin, in body
- coordinates.
- """
- mass = number_to_float(M_BBo_B.get_mass())
- p_BoBcm_B = M_BBo_B.get_com()
- M_BBcm_B = M_BBo_B.Shift(p_BoBcm_B)
- I_BBcm_B = M_BBcm_B.CalcRotationalInertia()
-
- mass_list = [
- mass * number_to_float(coordinate) for coordinate in p_BoBcm_B
- ]
-
- inertia_list = [
- number_to_float(I_BBcm_B[index[0], index[1]])
- for index in INERTIA_INDICES
- ]
- pi = Tensor([mass] + mass_list + inertia_list)
- return pi
-
- @staticmethod
- def drake_to_theta(M_BBo_B: DrakeSpatialInertia) -> Tensor:
- """Passthrough chain of :py:meth:`drake_to_pi_cm` and
- :py:meth:`pi_cm_to_theta`."""
- pi_cm = InertialParameterConverter.drake_to_pi_cm(M_BBo_B)
- return InertialParameterConverter.pi_cm_to_theta(pi_cm)
-
- @staticmethod
- def pi_cm_to_scalars(pi_cm: Tensor) -> Dict[str, float]:
- """Converts ``pi_cm`` parameterization to ``scalars`` dictionary."""
- mass = pi_cm[0]
- p_BoBcm_B = pi_cm[1:4] / mass
- I_BBcm_B = pi_cm[4:]
- scalars = {"m": mass.item()}
- scalars.update({
- f'com_{axis}': p_axis.item()
- for axis, p_axis in zip(AXES, p_BoBcm_B)
- })
- scalars.update({
- inertia_scalar: inertial_value.item()
- for inertia_scalar, inertial_value in zip(INERTIA_SCALARS, I_BBcm_B)
- })
- return scalars
\ No newline at end of file
diff --git a/dair_pll_old/dair_pll/integrator.py b/dair_pll_old/dair_pll/integrator.py
deleted file mode 100644
index 2a26778..0000000
--- a/dair_pll_old/dair_pll/integrator.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""Classes for time integration on state spaces
-
-This module implements an :py:class:`Integrator` abstract type which is a
-convenience wrapper for various forms of integrating dynamics in time over a
-``StateSpace``. As state spaces are assumed to be the product space of a Lie
-group and its associated algebra (G x g), there are several options for which a
-"partial step" ``partial_step`` might be defined for mapping current states
-to next states:
-
- * x -> x' in G x g (current state to next state)
- * x -> dx in g x R^n_v, and x' = x * exp(dx) (current state to state delta)
- * x -> v' in g, and q' = q * exp(v') (current state to next velocity)
- * x -> dv in R^n_v, v' = v + dv (current state to velocity delta)
- * x -> q' in G, and v' = log(q' * inv(q))/dt (current state to next
- configuration)
- * x -> dq in g, q' = q * exp(dq), v' = log(q' * inv(q))/dt (current state to
- configuration delta).
-
-Each option is implemented as a convenience class inheriting from
-:py:class:`Integrator`.
-
-In addition to this state mapping, the integrator allows for an additional
-hidden state denoted as ``carry`` to be propagated through .
-
-:py:class:`Integrator` objects have a simulation interface that requires an
-initial condition in the form of an initial state and ``carry``.
-"""
-from abc import ABC, abstractmethod
-from typing import Callable, Optional, Tuple
-
-from torch import Tensor
-from torch.nn import Module
-
-from dair_pll import tensor_utils
-from dair_pll.state_space import StateSpace
-
-PartialStepCallback = Callable[[Tensor, Tensor], Tuple[Tensor, Tensor]]
-
-
-class Integrator(ABC, Module):
- """Class that manages integration in time of given dynamics.
-
- Takes in a ``partial_step`` callback object which defines the underlying
- dynamics abstractly. Inheriting classes makes this relationship concrete.
-
- This class is primarily used for its :py:meth:`simulate` method which
- integrates forward in time from a given initial condition.
- """
- partial_step_callback: Optional[PartialStepCallback]
- space: StateSpace
- dt: float
- out_size: int
-
- def __init__(self, space: StateSpace,
- partial_step_callback: PartialStepCallback, dt: float) -> None:
- """Inits :py:class:`Integrator` with specified dynamics.
-
- Args:
- space: state space of system to be integrated.
- partial_step_callback: Dynamics defined as partial update from
- current state to next state. Exact usage is abstract.
- dt: time step.
- """
- super().__init__()
- self.partial_step_callback = partial_step_callback
- self.space = space
- self.dt = dt
- self.out_size = type(self).calc_out_size(space)
-
- def partial_step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Wrapper method for calling ``partial_step_callback``"""
- assert self.partial_step_callback is not None
- return self.partial_step_callback(x, carry)
-
- def simulate(self, x_0: Tensor, carry_0: Tensor,
- steps: int) -> Tuple[Tensor, Tensor]:
- """Simulates forward in time from initial condition.
-
- Args:
- x_0: (\*, space.n_x) batch of initial condition states
- carry_0: (\*, ?) batch of initial condition hidden states
- steps: number of steps to simulate forward in time (>= 0)
-
- Returns:
- (\*, space.n_x, steps + 1) simulated trajectory.
- """
- assert steps >= 0
- assert x_0.shape[-1] == self.space.n_x
- x_trajectory = tensor_utils.tile_penultimate_dim(
- x_0.unsqueeze(-2), steps + 1)
- carry_trajectory = tensor_utils.tile_penultimate_dim(
- carry_0.unsqueeze(-2), steps + 1)
- x = x_0
- carry = carry_0
- for step in range(steps):
- x, carry = self.step(x, carry)
- x_trajectory[..., step + 1, :] = x
- carry_trajectory[..., step + 1, :] = carry
- return x_trajectory, carry_trajectory
-
- @abstractmethod
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Takes single step in time.
-
- Abstract wrapper which inheriting classes incorporate
- :py:meth:`partial_step` into to complete a single time step.
-
- Args:
- x: (\*, space.n_x) current state
- carry: (\*, ?) current hidden state
-
- Returns:
- (\*, space.n_x) next state
- (\*, ?) next hidden state
- """
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- """Final dimension of output shape of :py:meth:`partial_step`"""
- return space.n_x
-
-
-class StateIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state directly to next state."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by direct passthrough to :py:meth:`partial_step`"""
- x_next, carry = self.partial_step(x, carry)
- return self.space.project_state(x_next), carry
-
-
-class DeltaStateIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state to state delta."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by perturbing current state by output of
- :py:meth:`partial_step`"""
- space = self.space
- dx, carry = self.partial_step(x, carry)
- return space.shift_state(x, dx), carry
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- return 2 * space.n_v
-
-
-class VelocityIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state to next velocity."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by setting next velocity to output of
- :py:meth:`partial_step` and implicit Euler integration of the
- configuration."""
- space = self.space
- q = space.q(x)
- v_next, carry = self.partial_step(x, carry)
- q_next = space.euler_step(q, v_next, self.dt)
-
- return space.x(q_next, v_next), carry
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- return space.n_v
-
-
-class DeltaVelocityIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state to velocity delta."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by perturbing current velocity by output of
- :py:meth:`partial_step` and implicit Euler integration of the
- configuration."""
- space = self.space
- q, v = space.q_v(x)
- dv, carry = self.partial_step(x, carry)
- v_next = v + dv
- q_next = space.euler_step(q, v_next, self.dt)
-
- return space.x(q_next, v_next), carry
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- return space.n_v
-
-
-class ConfigurationIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state to next configuration."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by setting next configuration to output of
- :py:meth:`partial_step` and finite differencing for the next
- velocity."""
- space = self.space
- q = space.q(x)
- q_next_pre_projection, carry = self.partial_step(x, carry)
- q_next = space.project_configuration(q_next_pre_projection)
- v_next = space.finite_difference(q, q_next, self.dt)
- return space.x(q_next, v_next), carry
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- return space.n_q
-
-
-class DeltaConfigurationIntegrator(Integrator):
- """Convenience class for :py:class:`Integrator` where
- :py:meth:`partial_step` maps current state to configuration delta."""
-
- def step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """Integrates by perturbing current configuration by output of
- :py:meth:`partial_step` and finite differencing for the next
- velocity."""
- space = self.space
- q = space.q(x)
- dq, carry = self.partial_step(x, carry)
- q_next = space.exponential(q, dq)
- v_next = dq / self.dt
- return space.x(q_next, v_next), carry
-
- @staticmethod
- def calc_out_size(space: StateSpace) -> int:
- return space.n_v
diff --git a/dair_pll_old/dair_pll/mujoco_experiment.py b/dair_pll_old/dair_pll/mujoco_experiment.py
deleted file mode 100644
index 3c37503..0000000
--- a/dair_pll_old/dair_pll/mujoco_experiment.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import os
-import pdb
-from dataclasses import dataclass
-from typing import cast, Callable
-
-import torch
-
-from dair_pll.deep_learnable_system import \
- DeepLearnableSystemConfig, DeepLearnableExperiment
-from dair_pll.experiment import DataConfig, TrajectorySliceDataset
-from dair_pll.experiment_config import OptimizerConfig, \
- SupervisedLearningExperimentConfig
-from dair_pll.mujoco_system import MuJoCoSystem, MuJoCoUKFSystem
-
-
-@dataclass
-class MuJoCoExperimentConfig(SupervisedLearningExperimentConfig):
- xml: str = 'assets/cube_mujoco.xml'
- stiffness: float = 100.
- damping_ratio: float = 1.00
- v200: bool = False
-
-
-class MuJoCoExperiment(DeepLearnableExperiment):
-
- def __init__(self, config: MuJoCoExperimentConfig) -> None:
- super().__init__(config)
-
- def get_base_system(self) -> MuJoCoSystem:
- config = cast(MuJoCoExperimentConfig, self.config)
- dt = config.data_config.dt
- return MuJoCoSystem(config.xml, dt, config.stiffness,
- config.damping_ratio, config.v200)
-
- def get_oracle_system(self) -> MuJoCoSystem:
- config = cast(MuJoCoExperimentConfig, self.config)
- data_config = config.data_config
- noiser = data_config.noiser_type(self.space)
- P0_diag, R_diag = MuJoCoUKFSystem.noise_stds_to_P0_R_stds(
- data_config.static_noise, data_config.dynamic_noise, data_config.dt)
- P0 = noiser.covariance(P0_diag)
- R = noiser.covariance(R_diag)
- return MuJoCoUKFSystem(config.xml, data_config.dt, config.stiffness,
- config.damping_ratio, config.v200, P0, R)
-
-
-if __name__ == "__main__":
- '''
- stiffness = 2500
- study_name = f'mujoco_cube_{stiffness}_experiment_test'
- if stiffness == 2500:
- optimizer_config = OptimizerConfig(
- lr = 1e-4,
- wd = 0.
- )
-
- elif stiffness == 100:
- optimizer_config = OptimizerConfig()
- experiment_config = MuJoCoDataExperimentConfig(
- optimizer_config = optimizer_config,
- stiffness = stiffness,
- study = study_name,
- N_pop = 1024
- )
- experiment = MuJoCoDataExperiment(experiment_config)
- experiment.train()
- '''
-
- eval_test = False
- ukf_test = False
- if eval_test:
- stiffness = 2500
- POP = 1024
- T_SKIP = 16
- V200 = False
- CUBE_XML = 'assets/cube_mujoco.xml'
-
- study_name = f'mujoco_cube_{stiffness}_eval_test'
- os.system(f'rm -r results/{study_name}')
-
- optimizer_config = OptimizerConfig(lr=1e-4, wd=0., patience=0)
-
- learnable_config = DeepLearnableSystemConfig()
- data_config = DataConfig(
- n_pop=POP,
- n_train=POP - 4,
- n_valid=2,
- n_test=2,
- t_skip=T_SKIP,
- t_history=1,
- storage=study_name # ,
- # static_noise = torch.zeros(12),
- # dynamic_noise = torch.zeros(12)
- )
-
- experiment_config = MuJoCoExperimentConfig(
- xml=CUBE_XML,
- learnable_config=learnable_config,
- optimizer_config=optimizer_config,
- data_config=data_config,
- stiffness=stiffness,
- v200=V200)
-
- experiment = MuJoCoExperiment(experiment_config)
- _, best_valid_loss, learned_system, train_traj, valid_traj, test_traj = experiment.train(
- )
- stats = experiment.evaluation(learned_system, train_traj, valid_traj,
- test_traj,
- TrajectorySliceDataset(train_traj),
- TrajectorySliceDataset(valid_traj),
- TrajectorySliceDataset(test_traj))
- # print(stats['train_oracle_loss_mean'])
- oracle_tensor = torch.tensor(stats['train_oracle_loss'])
- print('oracle: loss ', oracle_tensor.mean())
- print('rot err degrees',
- torch.tensor(stats['train_oracle_rot_err']).mean() * 180 / 3.1415)
- print('pos err percent',
- torch.tensor(stats['train_oracle_pos_err']).mean() * 100 / 0.1)
- # print(oracle_tensor.std() / np.sqrt(len(stats['train_oracle_loss'])))
- pdb.set_trace()
-
- if ukf_test:
- stiffness = 2500
- POP = 16
- V200 = False
- CUBE_XML = 'assets/cube_mujoco.xml'
-
- study_name = f'mujoco_cube_{stiffness}_ukf_test'
- os.system(f'rm -r results/{study_name}')
-
- optimizer_config = OptimizerConfig(lr=1e-4, wd=0., patience=0)
-
- learnable_config = DeepLearnableSystemConfig()
- data_config = DataConfig(
- n_pop=POP,
- n_train=POP - 2,
- n_valid=1,
- n_test=1,
- t_history=16,
- storage=study_name
- # T_skip = TSKIP,
- # static_noise = torch.zeros(12)#,
- # dynamic_noise = torch.zeros(12)
- # noiser_type = GaussianWhiteNoiser
- )
-
- experiment_config = MuJoCoExperimentConfig(
- xml=CUBE_XML,
- learnable_config=learnable_config,
- optimizer_config=optimizer_config,
- data_config=data_config,
- stiffness=stiffness,
- v200=V200)
-
- experiment = MuJoCoExperiment(experiment_config)
- _, best_valid_loss, learned_system, train_traj, valid_traj, test_traj = experiment.train(
- )
- dataset = experiment.data_manager.slice(train_traj)
- base_system = experiment.get_base_system()
- oracle_system = experiment.get_oracle_system()
- oracle_loss = []
- base_loss = []
- from time import time
-
- t0 = time()
- N = 0
- M = 0
- BL_MIN = 1e-3
- for (x, y) in dataset:
- M += 1
- bl = experiment.evaluation_loss(x.clone().unsqueeze(0),
- y.unsqueeze(0), base_system)
- if bl > BL_MIN:
- N += 1
- base_loss.append(bl)
- oracle_loss.append(
- experiment.evaluation_loss(x.clone().unsqueeze(0),
- y.unsqueeze(0), oracle_system))
- dur = time() - t0
- print(dur, N, dur / N, M)
- itemize: Callable = lambda l: [i.item() for i in l]
- base_loss = itemize(base_loss)
- oracle_loss = itemize(oracle_loss)
- print(sum(base_loss) / N, sum(oracle_loss) / N)
- import matplotlib.pyplot as plt
-
- # pdb.set_trace()
- fig = plt.figure()
- ax = plt.gca()
- ax.scatter(base_loss, oracle_loss)
- ax.set_yscale('log')
- ax.set_xscale('log')
- min_loss = min(base_loss + oracle_loss)
- max_loss = max(base_loss + oracle_loss)
- bounds = [min_loss, max_loss]
- ax.plot(bounds, bounds)
- ax.set_xlim(bounds)
- ax.set_ylim(bounds)
- plt.show()
diff --git a/dair_pll_old/dair_pll/mujoco_system.py b/dair_pll_old/dair_pll/mujoco_system.py
deleted file mode 100644
index 6ad2be9..0000000
--- a/dair_pll_old/dair_pll/mujoco_system.py
+++ /dev/null
@@ -1,491 +0,0 @@
-import pdb
-from typing import Tuple, Optional
-
-import numpy as np
-import optuna
-import torch
-from mujoco_py import load_model_from_xml, MjSim
-from scipy.optimize import minimize
-from torch import Tensor
-
-from dair_pll import state_space
-from dair_pll.integrator import StateIntegrator
-from dair_pll.system import System
-from dair_pll.ukf import UKF
-
-
-class MuJoCoStateConverter():
-
- @staticmethod
- def mujoco_to_state(x_mujoco):
- # mujoco ordering: [p, q, j]
- t = lambda x: torch.tensor(x).clone()
- p = t(x_mujoco.qpos[:3])
- q = t(x_mujoco.qpos[3:7])
- j = t(x_mujoco.qpos[7:])
- v = t(x_mujoco.qvel[:3])
- omega = t(x_mujoco.qvel[3:6])
- vj = t(x_mujoco.qvel[6:])
- return torch.cat((q, p, j, omega, v, vj)).unsqueeze(0)
-
- @staticmethod
- def state_to_mujoco(x_mujoco, q: Tensor, v: Tensor):
- # mujoco ordering: [p, q, j]
- x_mujoco.qpos[3:7] = q[..., :4].squeeze()
- x_mujoco.qpos[:3] = q[..., 4:7].squeeze()
- x_mujoco.qpos[7:] = q[..., 7:].squeeze()
-
- x_mujoco.qvel[3:7] = v[..., :3].squeeze()
- x_mujoco.qvel[:3] = v[..., 3:6].squeeze()
- x_mujoco.qvel[7:] = v[..., 6:].squeeze()
-
- return x_mujoco
-
-
-class MuJoCoSystem(System):
- sim: MjSim
-
- def __init__(self,
- mjcf: str,
- dt: float,
- stiffness: float,
- damping_rato: float,
- v200: bool = False) -> None:
-
- time_constant = 1. / (damping_rato * np.sqrt(stiffness))
- total_damping = damping_rato * 2 * np.sqrt(stiffness)
- sys_xml = ''
- with open(mjcf, 'r') as sysfile:
- if v200:
- sys_xml = sysfile.read() \
- .replace("$solrefarg1", str(-stiffness)) \
- .replace("$solrefarg2", str(-total_damping)) \
- .replace("$dt", str(dt))
- else:
- sys_xml = sysfile.read() \
- .replace("$solrefarg1", str(time_constant)) \
- .replace("$solrefarg2", str(damping_rato)) \
- .replace("$dt", str(dt))
- # print(sys_xml)
- model = load_model_from_xml(sys_xml)
- sim = MjSim(model)
- sim_state = sim.get_state()
- # pdb.set_trace()
- n_joints = len(sim_state.qpos) - 7
- space = state_space.FloatingBaseSpace(n_joints)
- integrator = StateIntegrator(space, self.sim_step, dt)
- super().__init__(space, integrator)
- self.max_batch_dim = 0
- self.sim = sim
- self.set_carry_sampler(lambda: torch.tensor([[False]]))
-
- def preprocess_initial_condition(self, x_0: Tensor,
- carry_0: Tensor) -> Tuple[Tensor, Tensor]:
- # get dummy state
- while len(x_0.shape) >= 2:
- # x0 has (probably trivial) duration dimension;
- # select most recent precondition
- x_0 = x_0[-1, ...]
-
- while len(carry_0.shape) >= 2:
- # x0 has (probably trivial) duration dimension;
- # select most recent precondition
- carry_0 = carry_0[-1, ...]
-
- sim_state = self.sim.get_state()
- # pdb.set_trace()
- q0, v0 = self.space.q_v(x_0)
- # pdb.set_trace()
- state0 = MuJoCoStateConverter.state_to_mujoco(sim_state, q0, v0)
- self.sim.set_state(state0)
- self.sim.forward()
- # pdb.set_trace()
- return x_0, carry_0 or torch.tensor([True])
-
- def sim_step(self, x: Tensor, carry: Tensor) -> Tensor:
- # carry detects if first step has been taken yet
- # pdb.set_trace()
- self.sim.step()
- # pdb.set_trace()
- x_next = MuJoCoStateConverter.mujoco_to_state(self.sim.get_state())
- # print(self.sim.get_state())
- return x_next, carry
-
-
-SENSE_VELOCITY = True
-
-BIAS = False
-BIAS_VEL = False
-SENSE_BIAS = True
-
-
-class MuJoCoUKFSystem(MuJoCoSystem):
- P0: Tensor
- R: Tensor
-
- @staticmethod
- def noise_stds_to_P0_R_stds(static_stds: Tensor, dynamic_std: Tensor,
- dt: float) -> Tuple[Tensor, Tensor]:
- if BIAS:
- M = 1.0
- nv = static_stds.nelement() // 2
- composite_noise_diag = dynamic_std[:nv] # + static_stds[:nv]
- state0_diag = torch.cat(
- (composite_noise_diag, dynamic_std[:nv] * np.sqrt(2 / dt))) * M
- bias0_diag = (static_stds + 1e-8) * M
- if BIAS_VEL:
- P0_diag = torch.cat((state0_diag, bias0_diag))
- else:
- P0_diag = torch.cat((state0_diag, bias0_diag[:nv]))
- if SENSE_VELOCITY:
- R_diag = state0_diag.clone()
- if SENSE_BIAS:
- R_diag = P0_diag.clone()
- else:
- # hack to fix error detected??
- R_diag = composite_noise_diag.clone()
- # R_diag = composite_noise_diag.clone()
-
- return (P0_diag, R_diag)
- else:
- nv = static_stds.nelement() // 2
- config_noise_diag = torch.sqrt(static_stds ** 2 + dynamic_std ** 2)[
- :nv]
- P0_diag = torch.cat(
- (config_noise_diag, dynamic_std[:nv] * np.sqrt(2 / dt)))
- if SENSE_VELOCITY:
- R_diag = P0_diag.clone()
- else:
- R_diag = config_noise_diag.clone()
- return (P0_diag, R_diag)
-
- def __init__(self,
- mjcf: str,
- dt: float,
- stiffness: float,
- damping_rato: float,
- v200: bool = False,
- P0: Optional[Tensor] = None,
- R: Optional[Tensor] = None) -> None:
- super().__init__(mjcf, dt, stiffness, damping_rato, v200)
- if P0 is None:
- P0 = torch.eye((4 if BIAS else 2) * self.space.n_v)
- if R is None:
- R = torch.eye((2 if SENSE_VELOCITY else 1) * self.space.n_v) * 1e-2
- self.P0 = P0
- self.R = R
-
- def ukf_estimate(self, x0: Tensor) -> Tensor:
-
- # pdb.set_trace()
-
- numpify = lambda x: x.detach().numpy()
- torchify = lambda x: torch.tensor(x, dtype=torch.float64).clone()
-
- SIC = super().preprocess_initial_condition
-
- # reduce to 1 trajectory
- while len(x0.shape) >= 3:
- # x0 has (probably trivial) duration dimension;
- # select most recent precondition
- x0 = x0[-1, ...]
-
- def ukf_f(state, omega, w, dt):
- # omega is input; neglect for now
- # w is a tangent space noise vector
- # pdb.set_trace()
- state = torchify(state).unsqueeze(0)
- w = torchify(w).unsqueeze(0)
- x0 = self.space.shift_state(state, w)
- carry = self.carry_callback()
- SIC(x0, carry)
- return numpify(self.sim_step(x0, carry)[0].squeeze(0))
-
- def ukf_h(state):
- state = torchify(state).unsqueeze(0)
- zero = torchify(self.space.zero_state().unsqueeze(0))
- if SENSE_VELOCITY:
- return numpify(
- self.space.state_difference(zero, state).squeeze(0))
- # pdb.set_trace()
- else:
- zero_q = self.space.q(zero)
- state_q = self.space.q(state)
- return numpify(
- self.space.configuration_difference(zero_q,
- state_q).squeeze(0))
-
- def ukf_phi(state, dstate):
- state = torchify(state).unsqueeze(0)
- dstate = torchify(dstate).unsqueeze(0)
- return numpify(self.space.shift_state(state, dstate).squeeze(0))
-
- def ukf_phi_inv(x1, x2):
- x1 = torchify(x1).unsqueeze(0)
- x2 = torchify(x2).unsqueeze(0)
- return numpify(self.space.state_difference(x1, x2).squeeze(0))
-
- Q = numpify(1e-10 * torch.eye(2 * self.space.n_v))
-
- alpha = 1e-1 * np.array([1., 1., 1.])
-
- start = numpify(torchify(x0[0, :]))
-
- R = numpify(self.R)
- # pdb.set_trace()
-
- P0 = numpify(self.P0.clone())
-
- ukf = UKF(ukf_f, ukf_h, ukf_phi, ukf_phi_inv, Q, R, alpha, start, P0)
- # pdb.set_trace()
- for x_i in x0[1:, :]:
- ukf.propagation(torch.tensor(0.), self.integrator.dt)
-
- y_i = ukf_h(numpify(x_i))
- ukf.update(y_i)
-
- # pdb.set_trace()
- print('done!')
- return torchify(ukf.state).unsqueeze(0)
-
- def ukf_bias_estimate(self, x0: Tensor) -> Tensor:
-
- # pdb.set_trace()
-
- numpify = lambda x: x.detach().numpy()
- torchify = lambda x: torch.tensor(x, dtype=torch.float64).clone()
-
- SIC = super().preprocess_initial_condition
-
- # reduce to 1 trajectory
- while len(x0.shape) >= 3:
- # x0 has (probably trivial) duration dimension;
- # select most recent precondition
- x0 = x0[-1, ...]
-
- def ukf_f(state, omega, w, dt):
- # omega is input; neglect for now
- # w is a tangent space noise vector
- # pdb.set_trace()
-
- # state is actual state and a bias
- # pdb.set_trace()
- state = ukf_phi(state, w)
- state = torchify(state).unsqueeze(0)
- bias = state[:, self.space.n_x:]
- shift = bias
- if not BIAS_VEL:
- shift = torch.cat((bias, 0. * bias), dim=1)
- state = self.space.shift_state(state[:, :self.space.n_x], shift)
- # w = torchify(w).unsqueeze(0)
- # x0 = self.space.shift_state(state, w)
- carry = self.carry_callback()
- SIC(x0, carry)
- real_next_state = self.sim_step(x0, carry)[0]
- # pdb.set_trace()
- sensed_next_state = self.space.shift_state(real_next_state, -shift)
- return numpify(
- torch.cat((sensed_next_state.squeeze(0), bias.squeeze(0))))
-
- def ukf_h(state):
- # pdb.set_trace()
- state = torchify(state).unsqueeze(0)
- bias = state[:, self.space.n_x:]
- state = state[:, :self.space.n_x]
- zero = torchify(self.space.zero_state().unsqueeze(0))
- if SENSE_VELOCITY:
- ds = self.space.state_difference(zero, state)
- if SENSE_BIAS:
- return numpify(torch.cat((ds.squeeze(0), bias.squeeze(0))))
- return numpify(ds.squeeze(0))
-
- else:
- zero_q = self.space.q(zero)
- state_q = self.space.q(state)
- return numpify(
- self.space.configuration_difference(zero_q,
- state_q).squeeze(0))
-
- def ukf_phi(state, delta):
- state = torchify(state).unsqueeze(0)
- bias = state[:, self.space.n_x:]
- state = state[:, :self.space.n_x]
- dstate = torchify(delta[:(2 * self.space.n_v)]).unsqueeze(0)
- dbias = torchify(delta[(2 * self.space.n_v):]).unsqueeze(0)
-
- fullx = self.space.shift_state(state, dstate).squeeze(0)
- if dbias.nelement() == 0:
- pdb.set_trace()
- fullbias = (bias + dbias).squeeze(0)
-
- return numpify(torch.cat((fullx, fullbias)))
-
- def ukf_phi_inv(x1, x2):
- x1 = torchify(x1).unsqueeze(0)
- x2 = torchify(x2).unsqueeze(0)
- delta_bias = (x2[:, self.space.n_x:] -
- x1[:, self.space.n_x:]).squeeze(0)
- delta_state = self.space.state_difference(
- x1[:, :self.space.n_x], x2[:, :self.space.n_x]).squeeze(0)
-
- return numpify(torch.cat((delta_state, delta_bias)))
-
- Q = numpify(1e-8 * torch.eye((4 if BIAS_VEL else 3) * self.space.n_v))
-
- alpha = 1e-1 * np.array([1., 1., 1.])
-
- # start = numpify(torchify(x0[0, :]))
-
- NT = 2 * self.space.n_v
- start = numpify(
- torchify(
- torch.cat(
- (x0[0, :], torch.zeros(NT if BIAS_VEL else NT // 2)))))
-
- R = numpify(self.R)
- # pdb.set_trace()
-
- # P0 = torch.eye(2 * NT)
- # P0[:NT, :NT] = self.P0.clone()
- # P0[NT:, NT:] = self.P0.clone()
- P0 = numpify(self.P0.clone())
- # pdb.set_trace()
- ukf = UKF(ukf_f, ukf_h, ukf_phi, ukf_phi_inv, Q, R, alpha, start, P0)
- # pdb.set_trace()
- for x_i in x0[1:, :]:
- ukf.propagation(torch.tensor(0.), self.integrator.dt)
-
- y_i = ukf_h(
- numpify(torch.cat((x_i, 0. * x_i[(1 if BIAS_VEL else 7):]))))
- ukf.update(y_i)
-
- # pdb.set_trace()
- state = ukf.state
- state = torchify(state).unsqueeze(0)
- bias = state[:, self.space.n_x:]
- shift = bias
- if not BIAS_VEL:
- shift = torch.cat((bias, 0. * bias), dim=1)
- state = self.space.shift_state(state[:, :self.space.n_x], shift)
- # pdb.set_trace()
- print('done', bias.norm())
- return state
-
- def mll_estimate(self, x0: Tensor) -> Tensor:
- # pdb.set_trace()
- x0 = x0.squeeze()
- torchify = lambda x: torch.tensor(x, dtype=torch.float64).clone()
- torchify32 = lambda x: torch.tensor(x, dtype=torch.float32).clone()
- # torchify = lambda x: torch.tensor(x).clone()
- T = x0.shape[0]
-
- base_x0 = torchify(x0[0, :])
-
- SIC = super().preprocess_initial_condition
-
- x064 = torchify(x0)
- OPTUNA = True
- LSQ = False
-
- def eval_ic(exp_ic: np.ndarray) -> float:
- # pdb.set_trace()
-
- exp_ic = torchify(exp_ic).unsqueeze(0)
- ic = self.space.shift_state(base_x0.unsqueeze(0), exp_ic)
- # pdb.set_trace()
- # ic_traj = SSIM(ic, self.carry_callback(), T)
- x, carry = SIC(ic, self.carry_callback())
- ic_traj, carrytraj = self.integrator.simulate(x, carry, T - 1)
- deltas = self.space.state_difference(x064, ic_traj)
- # NLL = (deltas ** 2) / torch.diag(self.R)
- # return NLL.sum().item()
- scd = deltas / torch.sqrt(torch.diag(self.R)) * 1e-4
- scd = scd[:, :]
- if LSQ:
- return torch.flatten(scd).detach().numpy()
- else:
- return (scd ** 2).sum()
-
- # fitted_x0 = minimize(eval_ic, np.zeros((12,)), method = 'Nelder-Mead')
- z_window = 1 * torch.sqrt(torch.diag(self.R)).numpy()
-
- def hp2state(ostate):
- vstate = np.zeros(12)
- for i in range(12):
- vstate[i] = ostate[f'x_{i}']
- return vstate
-
- def optuna_shell(trial):
- ostate = {}
- for i in range(12):
- p = f'x_{i}'
- ostate[p] = trial.suggest_float(p, -z_window[i], z_window[i])
- vstate = hp2state(ostate)
- return eval_ic(vstate)
-
- if OPTUNA:
- study = optuna.create_study()
- optuna.logging.disable_default_handler()
- study.optimize(optuna_shell, n_trials=100)
- fitted_x0 = hp2state(study.best_params)
-
- else:
-
- LM = False
-
- method = 'lm' if LM else 'dogbox'
- if LSQ:
- bounds = (-np.inf, np.inf) if LM else (-z_window, z_window)
- else:
- bounds = [(-zi, zi) for zi in z_window]
- # fitted_x0 = least_squares(eval_ic, np.zeros((12,)), bounds=bounds, jac='3-point', verbose=1, method=method)
- fitted_x0 = minimize(eval_ic,
- np.zeros((12,)),
- method='Nelder-Mead',
- bounds=bounds).x
- # pdb.set_trace()
-
- start = self.space.shift_state(x0[0, :].unsqueeze(0),
- torchify32(fitted_x0).unsqueeze(0))
- # TODO
- start_traj, carrytraj = self.integrator.simulate(
- start, self.carry_callback(), T - 1)
- # pdb.set_trace()
- print('done')
- return self.space.shift_state(start_traj[-1, :].unsqueeze(0),
- torchify32(fitted_x0).unsqueeze(0))
-
- def preprocess_initial_condition(self, x_0: Tensor,
- carry_0: Tensor) -> Tuple[Tensor, Tensor]:
- # pdb.set_trace()
- estimate = self.ukf_bias_estimate(x_0) if BIAS else self.ukf_estimate(
- x_0)
- return super().preprocess_initial_condition(estimate, carry_0)
-
-
-if __name__ == "__main__":
- '''
- mjcsys = MuJoCoSystem('assets/cube_mujoco.xml', 6.74e-3, 2500., 1.04)
- starting_state = mjcsys.space.zero_state()
- starting_state[6] += 0.07
- mjcsys.set_state_sampler(state_space.ConstantSampler(mjcsys.space, starting_state))
-
- xtraj, carry = mjcsys.sample_trajectory(20)
-
- ukfsys = MuJoCoUKFSystem('assets/cube_mujoco.xml', 6.74e-3, 2500., 1.04)
-
- #pdb.set_trace()
-
- ukfsys.set_initial_condition(xtraj, carry[0])
-
- #learned_system = DeepLearnableSystem(mjcsys, DeepLearnableSystemConfig())
- print(xtraj.shape)
- print(xtraj[-1, :])
- '''
- '''
- import matplotlib.pyplot as plt
- plt.plot(xtraj[:, 6])
- plt.plot(xtraj_chain[:, 6].detach())
- plt.legend(['drake', 'todorov'])
- plt.show()
- '''
diff --git a/dair_pll_old/dair_pll/multibody_learnable_system.py b/dair_pll_old/dair_pll/multibody_learnable_system.py
deleted file mode 100644
index 21d2d40..0000000
--- a/dair_pll_old/dair_pll/multibody_learnable_system.py
+++ /dev/null
@@ -1,693 +0,0 @@
-"""Construction and analysis of learnable multibody systems.
-
-Similar to Drake, multibody systems are instantiated as a child class of
-:py:class:`System`: :py:class:`MultibodyLearnableSystem`. This object is a thin
-wrapper for a :py:class:`MultibodyTerms` member variable, which manages
-computation of lumped terms necessary for simulation and evaluation.
-
-Simulation is implemented via Anitescu's [1] convex method.
-
-An interface for the ContactNets [2] loss is also defined as an alternative
-to prediction loss.
-
-A large portion of the internal implementation of :py:class:`DrakeSystem` is
-implemented in :py:class:`MultibodyPlantDiagram`.
-
-[1] M. Anitescu, “Optimization-based simulation of nonsmooth rigid
-multibody dynamics,” Mathematical Programming, 2006,
-https://doi.org/10.1007/s10107-005-0590-7
-
-[2] S. Pfrommer*, M. Halm*, and M. Posa. "ContactNets: Learning Discontinuous
-Contact Dynamics with Smooth, Implicit Representations," Conference on
-Robotic Learning, 2020, https://proceedings.mlr.press/v155/pfrommer21a.html
-"""
-from multiprocessing import pool
-from os import path
-from typing import List, Tuple, Optional, Dict, cast
-
-import numpy as np
-import torch
-import pdb
-import time
-# from sappy import SAPSolver # type: ignore
-from torch import Tensor
-from torch.nn import Module
-import torch.nn as nn
-
-from dair_pll import urdf_utils, tensor_utils, file_utils
-from dair_pll.drake_system import DrakeSystem
-from dair_pll.integrator import VelocityIntegrator
-from dair_pll.multibody_terms import MultibodyTerms
-from dair_pll.quaternion import quaternion_to_rotmat_vec
-from dair_pll.solvers import DynamicCvxpyLCQPLayer
-from dair_pll.state_space import FloatingBaseSpace
-from dair_pll.system import System, SystemSummary
-from dair_pll.tensor_utils import pbmm, broadcast_lorentz, \
- one_vector_block_diagonal, project_lorentz, reflect_lorentz
-
-
-# Loss variations options
-LOSS_PLL_ORIGINAL = 'loss_pll_original'
-LOSS_POWER = 'loss_power'
-LOSS_INERTIA_AGNOSTIC = 'loss_inertia_agnostic'
-LOSS_BALANCED = 'loss_balanced'
-LOSS_CONTACT_VELOCITY = 'loss_contact_velocity'
-LOSS_VARIATIONS = [LOSS_PLL_ORIGINAL, LOSS_POWER, LOSS_INERTIA_AGNOSTIC,
- LOSS_BALANCED, LOSS_CONTACT_VELOCITY]
-LOSS_VARIATION_NUMBERS = [str(LOSS_VARIATIONS.index(loss_variation)) \
- for loss_variation in LOSS_VARIATIONS]
-
-# Scaling factors to equalize translation and rotation errors.
-# For rotation versus linear scaling: penalize 0.1 meters same as 90 degrees.
-ROTATION_SCALING = 0.2/torch.pi
-# For articulation versus linear/rotation scaling: penalize the scenario where
-# one elbow link is in the right place and the other is 180 degrees flipped the
-# same, whether link 1 or link 2 are in the right place.
-ELBOW_COM_TO_AXIS_DISTANCE = 0.035
-JOINT_SCALING = 2*ELBOW_COM_TO_AXIS_DISTANCE/torch.pi + ROTATION_SCALING
-
-
-class MultibodyLearnableSystem(System):
- """:py:class:`System` interface for dynamics associated with
- :py:class:`MultibodyTerms`."""
- multibody_terms: MultibodyTerms
- init_urdfs: Dict[str, str]
- output_urdfs_dir: Optional[str] = None
- visualization_system: Optional[DrakeSystem]
- solver: DynamicCvxpyLCQPLayer
- dt: float
- loss_variation_txt: str
-
- def __init__(self,
- init_urdfs: Dict[str, str],
- dt: float,
- inertia_mode: int,
- loss_variation: int,
- w_pred: float,
- w_comp: float,
- w_diss: float,
- w_pen: float,
- w_res: float,
- w_res_w: float,
- do_residual: bool = False,
- output_urdfs_dir: Optional[str] = None,
- network_width: int = 128,
- network_depth: int = 2,
- represent_geometry_as: str = 'box',
- randomize_initialization: bool = False,
- g_frac: float = 1.0) -> None:
- """Inits :py:class:`MultibodyLearnableSystem` with provided model URDFs.
-
- Implementation is primarily based on Drake. Bodies are modeled via
- :py:class:`MultibodyTerms`, which uses Drake symbolics to generate
- dynamics terms, and the system can be exported back to a
- Drake-interpretable representation as a set of URDFs.
-
- Args:
- init_urdfs: Names and corresponding URDFs to model with
- :py:class:`MultibodyTerms`.
- dt: Time step of system in seconds.
- inertia_mode: An integer 0, 1, 2, 3, or 4 representing the
- inertial parameters the model can learn. The higher the number
- the more inertial parameters are free to be learned, and 0
- corresponds to learning no inertial parameters.
- loss_variation: An integer 0, 1, 2, 3, or 4 representing the loss
- variation to use. 0 indicates the original PLL loss, 1 power loss,
- 2 inertia-agnostic, 3 balanced inertia-agnostic, and 4 contact
- velocity inertia-agnostic.
- output_urdfs_dir: Optionally, a directory that learned URDFs can be
- written to.
- randomize_initialization: Whether to randomize and export the
- initialization or not.
- """
- assert str(loss_variation) in LOSS_VARIATION_NUMBERS
-
- multibody_terms = MultibodyTerms(init_urdfs, inertia_mode,
- represent_geometry_as,
- randomize_initialization,
- g_frac=g_frac)
-
- space = multibody_terms.plant_diagram.space
- integrator = VelocityIntegrator(space, self.sim_step, dt)
- super().__init__(space, integrator)
-
- self.output_urdfs_dir = output_urdfs_dir
- self.multibody_terms = multibody_terms
- self.init_urdfs = init_urdfs
-
- if randomize_initialization:
- # Add noise and export.
- print(f'Randomizing initialization.')
- multibody_terms.randomize_multibody_terms(inertia_mode)
- self.multibody_terms = multibody_terms
- self.generate_updated_urdfs('init')
-
- self.loss_variation_txt = LOSS_VARIATIONS[loss_variation]
- self.visualization_system = None
- self.solver = DynamicCvxpyLCQPLayer(self.space.n_v)
- self.dt = dt
- self.set_carry_sampler(lambda: Tensor([False]))
- self.max_batch_dim = 1
- self.w_pred = w_pred
- self.w_comp = w_comp
- self.w_diss = w_diss
- self.w_pen = w_pen
- self.w_res = w_res
- self.w_res_w = w_res_w
-
- self.residual_net = None
-
- if do_residual:
- # This system type is only well defined for systems containing a
- # fixed ground and one floating base system.
- assert len(self.space.spaces) == 2
- self.object_space_idx = None
- for idx in range(len(self.space.spaces)):
- if type(self.space.spaces[idx]) == FloatingBaseSpace:
- self.object_space_idx = idx
- assert self.object_space_idx != None
-
- self.init_residual_network(network_width, network_depth)
-
- def generate_updated_urdfs(self, suffix: str = None) -> Dict[str, str]:
- """Exports current parameterization as a :py:class:`DrakeSystem`.
-
- Returns:
- New Drake system instantiated on new URDFs.
- """
- assert self.output_urdfs_dir is not None
- old_urdfs = self.init_urdfs
- new_urdf_strings = urdf_utils.represent_multibody_terms_as_urdfs(
- self.multibody_terms, self.output_urdfs_dir)
- new_urdfs = {}
-
- # saves new urdfs with original file basenames plus optional suffix in
- # new folder.
- for urdf_name, new_urdf_string in new_urdf_strings.items():
- new_urdf_filename = path.basename(old_urdfs[urdf_name])
- if suffix != None:
- new_urdf_filename = new_urdf_filename.split('.')[0] + '_' + \
- suffix + '.urdf'
-
- new_urdf_path = path.join(self.output_urdfs_dir, new_urdf_filename)
- file_utils.save_string(new_urdf_path, new_urdf_string)
- new_urdfs[urdf_name] = new_urdf_path
-
- return new_urdfs
-
- def contactnets_loss(self,
- x: Tensor,
- u: Tensor,
- x_plus: Tensor,
- loss_pool: Optional[pool.Pool] = None) -> Tensor:
- r"""Calculate ContactNets [1] loss for state transition.
-
- Change made to scale this loss to be per kilogram. This helps prevent
- sending mass quantities to zero in multibody learning scenarios.
-
- References:
- [1] S. Pfrommer*, M. Halm*, and M. Posa. "ContactNets: Learning
- Discontinuous Contact Dynamics with Smooth, Implicit
- Representations," Conference on Robotic Learning, 2020,
- https://proceedings.mlr.press/v155/pfrommer21a.html
-
- Args:
- x: (\*, space.n_x) current state batch.
- u: (\*, ?) input batch.
- x_plus: (\*, space.n_x) current state batch.
- loss_pool: optional processing pool to enable multithreaded solves.
-
- Returns:
- (\*,) loss batch.
- """
- loss_pred, loss_comp, loss_pen, loss_diss = \
- self.calculate_contactnets_loss_terms(x, u, x_plus)
-
- regularizers = self.get_regularization_terms(x, u, x_plus)
-
- # For now the regularization terms are: 0) residual norm, 1) residual
- # weights, 2) inertia matrix condition number. Will need to be updated
- # later if more are added.
- reg_norm = regularizers[0]
- reg_weight = regularizers[1]
- reg_inertia_cond = regularizers[2]
-
- loss = (self.w_res * reg_norm) + (self.w_res_w * reg_weight) + \
- (self.w_pred * loss_pred) + (self.w_comp * loss_comp) + \
- (self.w_pen * loss_pen) + (self.w_diss * loss_diss) + \
- (1e-5 * regularizers[2])
-
- return loss
-
- def get_regularization_terms(self, x: Tensor, u: Tensor,
- x_plus: Tensor) -> List[Tensor]:
- """Calculate some regularization terms."""
-
- regularizers = []
-
- # Residual size regularization.
- if self.residual_net != None:
- # Penalize the size of the residual. Good with w_res = 0.01.
- residual = self.residual_net(x_plus)
- residual_norm = torch.linalg.norm(residual, dim=1) ** 2
- regularizers.append(residual_norm)
-
- # Additionally penalize the residual network weights. This will get
- # scaled down to approximately the same size as the residual norm.
- l2_penalty = torch.zeros((x.shape[0],))
- for layer in self.residual_net:
- if isinstance(layer, nn.Linear):
- l2_penalty += sum([(p**2).sum() for p in layer.weight])
- # l2_penalty *= 1e-3
-
- regularizers.append(l2_penalty)
-
- else:
- # Otherwise, append 0 twice for the residual norm and weights.
- regularizers.append(torch.zeros((x.shape[-2],)))
- regularizers.append(torch.zeros((x.shape[-2],)))
-
- # Penalize the condition number of the mass matrix.
- q_plus, v_plus = self.space.q_v(x_plus)
- _, M, _, _, _ = self.get_multibody_terms(q_plus, v_plus, u)
- I_BBcm_B = M[..., :3, :3]
- regularizers.append(torch.linalg.cond(I_BBcm_B))
-
- # TODO: Use the believed geometry to help supervise the learned CoM.
- # if (self.multibody_terms.inertia_mode_txt != 'none') and \
- # (self.multibody_terms.inertia_mode_txt != 'masses'):
- # # This means the CoM locations are getting learned.
- # pass
-
- return regularizers
-
- def calculate_contactnets_loss_terms(self,
- x: Tensor,
- u: Tensor,
- x_plus: Tensor) -> \
- Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
- """Helper function for
- :py:meth:`MultibodyLearnableSystem.contactnets_loss` that returns the
- individual pre-weighted loss contributions:
-
- * Prediction
- * Complementarity
- * Penetration
- * Dissipation
-
- Args:
- x: (*, space.n_x) current state batch.
- u: (*, ?) input batch.
- x_plus: (*, space.n_x) current state batch.
-
- Returns:
- (*,) prediction error loss.
- (*,) complementarity violation loss.
- (*,) penetration loss.
- (*,) dissipation violation loss.
- (*,) residual regularizer (0 if no residual).
- """
- # pylint: disable-msg=too-many-locals
- v = self.space.v(x)
- q_plus, v_plus = self.space.q_v(x_plus)
- dt = self.dt
- eps = 0 #1e-4
- solver_eps = 1e-4
-
- # Begin loss calculation.
- delassus, M, J, phi, non_contact_acceleration = \
- self.get_multibody_terms(q_plus, v_plus, u)
-
- try:
- M_inv = torch.inverse((M))
- except:
- print(f'M: {M}')
- pdb.set_trace()
-
- # Construct a reordering matrix s.t. lambda_CN = reorder_mat @ f_sappy.
- n_contacts = phi.shape[-1]
- reorder_mat = tensor_utils.sappy_reorder_mat(n_contacts)
- reorder_mat = reorder_mat.reshape((1,) * (delassus.dim() - 2) +
- reorder_mat.shape).expand(
- delassus.shape)
- J_t = J[..., n_contacts:, :]
-
- # Construct a diagonal scaling matrix (3*n_contacts, 3*n_contacts) S
- # s.t. S @ lambda_CN = scaled lambdas in units [m/s] instead of [N s].
- delassus_diag_vec = torch.diagonal(delassus, dim1=-2, dim2=-1)
- contact_weights = pbmm(one_vector_block_diagonal(n_contacts, 3).t(),
- pbmm(reorder_mat.transpose(-1, -2),
- delassus_diag_vec.unsqueeze(-1)))
- contact_weights = broadcast_lorentz(contact_weights.squeeze(-1))
- S = torch.diag_embed(contact_weights)
-
- # Construct a diagonal scaling matrix (n_velocity, n_velocity) P s.t.
- # velocity errors are scaled to relate translation and rotation errors
- # in a thoughful way.
- P_diag = torch.ones_like(v)
- P_diag[..., :3] *= ROTATION_SCALING
- P_diag[..., 6:] *= JOINT_SCALING
- P = torch.diag_embed(P_diag)
-
- # pylint: disable=E1103
- double_zero_vector = torch.zeros(phi.shape[:-1] + (2 * n_contacts,))
- phi_then_zero = torch.cat((phi, double_zero_vector), dim=-1)
-
- # pylint: disable=E1103
- sliding_velocities = pbmm(J_t, v_plus.unsqueeze(-1))
- sliding_speeds = sliding_velocities.reshape(phi.shape[:-1] +
- (n_contacts, 2)).norm(
- dim=-1, keepdim=True)
-
- # Calculate "half delassus" based on loss formulation mode.
- if self.loss_variation_txt == LOSS_PLL_ORIGINAL:
- L = torch.linalg.cholesky(M_inv)
- half_delassus = pbmm(J, L)
- elif self.loss_variation_txt == LOSS_POWER:
- L = torch.linalg.cholesky(M_inv)
- half_delassus = pbmm(J, L)
- elif self.loss_variation_txt == LOSS_INERTIA_AGNOSTIC:
- half_delassus = pbmm(J, M_inv)
- elif self.loss_variation_txt == LOSS_BALANCED:
- half_delassus = pbmm(pbmm(J, M_inv), P)
- elif self.loss_variation_txt == LOSS_CONTACT_VELOCITY:
- half_delassus = delassus
-
- Q = pbmm(half_delassus, half_delassus.transpose(-1, -2)) + \
- eps * torch.eye(3 * n_contacts)
-
- J_M = pbmm(reorder_mat.transpose(-1,-2), half_delassus)
-
- dv = (v_plus - (v + non_contact_acceleration * dt)).unsqueeze(-2)
-
- # Calculate q vectors based on loss formulation mode.
- if self.loss_variation_txt == LOSS_PLL_ORIGINAL:
- q_pred = -pbmm(J, dv.transpose(-1, -2))
- q_comp = torch.abs(phi_then_zero).unsqueeze(-1)
- q_diss = dt*torch.cat((sliding_speeds, sliding_velocities), dim=-2)
- elif self.loss_variation_txt == LOSS_POWER:
- q_pred = -pbmm(J, dv.transpose(-1, -2))
- q_comp = (1/dt) * torch.abs(phi_then_zero).unsqueeze(-1)
- q_diss = torch.cat((sliding_speeds, sliding_velocities), dim=-2)
- elif self.loss_variation_txt == LOSS_INERTIA_AGNOSTIC:
- q_pred = -pbmm(J, pbmm(M_inv, dv.transpose(-1, -2)))
- # q_comp = (1/dt) * torch.abs(phi_then_zero).unsqueeze(-1)
- # q_diss = torch.cat((sliding_speeds, sliding_velocities), dim=-2)
- q_comp = (1/dt) * pbmm(S, torch.abs(phi_then_zero).unsqueeze(-1))
- q_diss = pbmm(S, torch.cat((sliding_speeds, sliding_velocities),
- dim=-2))
- elif self.loss_variation_txt == LOSS_BALANCED:
- q_pred = -pbmm(J, pbmm(M_inv, pbmm(pbmm(P, P),
- dv.transpose(-1, -2))))
- q_comp = (1/dt) * pbmm(S, torch.abs(phi_then_zero).unsqueeze(-1))
- q_diss = pbmm(S, torch.cat((sliding_speeds, sliding_velocities),
- dim=-2))
- elif self.loss_variation_txt == LOSS_CONTACT_VELOCITY:
- q_pred = -pbmm(delassus, pbmm(J, dv.transpose(-1, -2)))
- q_comp = (1/dt) * pbmm(S, torch.abs(phi_then_zero).unsqueeze(-1))
- q_diss = pbmm(S, torch.cat((sliding_speeds, sliding_velocities),
- dim=-2))
-
- q = q_pred + (self.w_comp/self.w_pred)*q_comp + \
- (self.w_diss/self.w_pred)*q_diss
-
- constant_pen = (torch.maximum(
- -phi, torch.zeros_like(phi))**2).sum(dim=-1)
- constant_pen = constant_pen.reshape(constant_pen.shape + (1,1))
-
- # Calculate the prediction constant based on loss formulation mode.
- if self.loss_variation_txt == LOSS_PLL_ORIGINAL:
- constant_pred = 0.5 * pbmm(dv, pbmm(M, dv.transpose(-1, -2)))
- elif self.loss_variation_txt == LOSS_POWER:
- constant_pred = 0.5 * pbmm(dv, pbmm(M, dv.transpose(-1, -2)))
- elif self.loss_variation_txt == LOSS_INERTIA_AGNOSTIC:
- constant_pred = 0.5 * pbmm(dv, dv.transpose(-1, -2))
- elif self.loss_variation_txt == LOSS_BALANCED:
- balanced_dv = pbmm(dv, P)
- constant_pred = 0.5 * pbmm(balanced_dv,
- balanced_dv.transpose(-1, -2))
- elif self.loss_variation_txt == LOSS_CONTACT_VELOCITY:
- contact_dv = pbmm(dv, J.transpose(-1, -2))
- constant_pred = 0.5 * pbmm(contact_dv, contact_dv.transpose(-1, -2))
-
- # Envelope theorem guarantees that gradient of loss w.r.t. parameters
- # can ignore the gradient of the force w.r.t. the QCQP parameters.
- # Therefore, we can detach ``force`` from pytorch's computation graph
- # without causing error in the overall loss gradient.
- # pylint: disable=E1103
- try:
- force = pbmm(
- reorder_mat,
- self.solver( #.apply(
- J_M,
- pbmm(reorder_mat.transpose(-1, -2),
- q).squeeze(-1)).detach().unsqueeze(-1))
- #pbmm(reorder_mat.transpose(-1, -2), q).squeeze(-1),
- #solver_eps).detach().unsqueeze(-1))
- except:
- print(f'J_M: {J_M}')
- print(f'reordered q: {pbmm(reorder_mat.transpose(-1, -2), q)}')
- pdb.set_trace()
-
- # Hack: remove elements of ``force`` where solver likely failed.
- invalid = torch.any((force.abs() > 1e3) | force.isnan() | force.isinf(),
- dim=-2,
- keepdim=True)
-
- constant_pen[invalid] *= 0.
- constant_pred[invalid] *= 0.
- force[invalid.expand(force.shape)] = 0.
-
- loss_pred = 0.5 * pbmm(force.transpose(-1, -2), pbmm(Q, force)) \
- + pbmm(force.transpose(-1, -2), q_pred) + constant_pred
- loss_comp = pbmm(force.transpose(-1, -2), q_comp)
- loss_pen = constant_pen
- loss_diss = pbmm(force.transpose(-1, -2), q_diss)
-
- return loss_pred.reshape(-1), loss_comp.reshape(-1), \
- loss_pen.reshape(-1), loss_diss.reshape(-1)
-
- def get_multibody_terms(self, q: Tensor, v: Tensor,
- u: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
- """Get multibody terms of the system. Without a residual, this is a
- straightfoward pass-through to the system's :py:class:`MultibodyTerms`.
- With a residual, the residual augments the continuous dynamics."""
-
- delassus, M, J, phi, non_contact_acceleration = self.multibody_terms(
- q, v, u)
-
- if self.residual_net != None:
- # Get the residual network's contribution.
- x = torch.cat((q, v), dim=1)
- residual = self.residual_net(x)/self.dt
- amended_acceleration = non_contact_acceleration + residual
-
- else:
- amended_acceleration = non_contact_acceleration
-
- return delassus, M, J, phi, amended_acceleration
-
- def init_residual_network(self, network_width: int, network_depth: int
- ) -> None:
- """Create and store a neural network architecture that has the multibody
- system state as input and outputs the size of the multibody system's
- velocity space."""
-
- def make_small_linear_layer(input_size, output_size):
- layer_with_small_init = nn.Linear(input_size, output_size)
- layer_with_small_init.weight.data *= 1e-2
- layer_with_small_init.bias.data *= 1e-2
- return layer_with_small_init
-
- layers: List[Module] = []
-
- layers.append(DeepStateAugment3D())
-
- n_augmented_state = self.space.n_x - 4 + 9
- layers.append(make_small_linear_layer(n_augmented_state, network_width))
- layers.append(nn.ReLU())
-
- for _ in range(network_depth - 1):
- layers.append(make_small_linear_layer(network_width, network_width))
- layers.append(nn.ReLU())
-
- layers.append(make_small_linear_layer(network_width, self.space.n_v))
-
- self.residual_net = nn.Sequential(*layers)
-
- def forward_dynamics(self,
- q: Tensor,
- v: Tensor,
- u: Tensor,
- dynamics_pool: Optional[pool.Pool] = None) -> Tensor:
- r"""Calculates delta velocity from current state and input.
-
- Implements Anitescu's [1] convex formulation in dual form, derived
- similarly to Tedrake [2] and described here.
-
- Let v_minus be the contact-free next velocity, i.e.::
-
- v + dt * non_contact_acceleration.
-
- Let FC be the combined friction cone::
-
- FC = {[beta_n beta_t]: beta_n_i >= ||beta_t_i||}.
-
- The primal version of Anitescu's formulation is as follows::
-
- min_{v_plus,s} (v_plus - v_minus)^T M(q)(v_plus - v_minus)/2
- s.t. s = [I; 0]phi(q)/dt + J(q)v_plus,
- s \\in FC.
-
- The KKT conditions are the mixed cone complementarity
- problem [3, Theorem 2]::
-
- s = [I; 0]phi(q)/dt + J(q)v_plus,
- M(q)(v_plus - v_minus) = J(q)^T f,
- FC \\ni s \\perp f \\in FC.
-
- As M(q) is positive definite, we can solve for v_plus in terms of
- lambda, and thus these conditions can be simplified to::
-
- FC \\ni D(q)f + J(q)v_minus + [I;0]phi(q)/dt \\perp f \\in FC.
-
- which in turn are the KKT conditions for the dual QCQP we solve::
-
- min_{f} f^T D(q) f/2 + f^T(J(q)v_minus + [I;0]phi(q)/dt)
- s.t. f \\in FC.
-
- References:
- [1] M. Anitescu, “Optimization-based simulation of nonsmooth rigid
- multibody dynamics,” Mathematical Programming, 2006,
- https://doi.org/10.1007/s10107-005-0590-7
-
- [2] R. Tedrake. Underactuated Robotics: Algorithms for Walking,
- Running, Swimming, Flying, and Manipulation (Course Notes for MIT
- 6.832), https://underactuated.mit.edu
-
- [3] S. Z. N'emeth, G. Zhang, "Conic optimization and
- complementarity problems," arXiv,
- https://doi.org/10.48550/arXiv.1607.05161
- Args:
- q: (\*, space.n_q) current configuration batch.
- v: (\*, space.n_v) current velocity batch.
- u: (\*, ?) current input batch.
- dynamics_pool: optional processing pool to enable multithreaded
- solves.
-
- Returns:
- (\*, space.n_v) delta velocity batch.
- """
- # pylint: disable=too-many-locals
- dt = self.dt
- eps = 1e6
- solver_eps = 1e-4
- delassus, M, J, phi, non_contact_acceleration = \
- self.get_multibody_terms(q, v, u)
- n_contacts = phi.shape[-1]
- contact_filter = (broadcast_lorentz(phi) <= eps).unsqueeze(-1)
- contact_matrix_filter = pbmm(contact_filter.int(),
- contact_filter.transpose(-1,
- -2).int()).bool()
-
- reorder_mat = tensor_utils.sappy_reorder_mat(n_contacts)
- reorder_mat = reorder_mat.reshape((1,) * (delassus.dim() - 2) +
- reorder_mat.shape).expand(
- delassus.shape)
-
- try:
- M_inv = torch.inverse((M))
- except:
- print(f'M: {M}')
- pdb.set_trace()
-
- try:
- L = torch.linalg.cholesky(M_inv)
- except:
- print(f'\nCannot calculate Cholesky of M_inv (M={M})')
- pdb.set_trace()
-
- J_M = pbmm(reorder_mat.transpose(-1, -2), pbmm(J, L))
-
- # pylint: disable=E1103
- double_zero_vector = torch.zeros(phi.shape[:-1] + (2 * n_contacts,))
- phi_then_zero = torch.cat((phi, double_zero_vector),
- dim=-1).unsqueeze(-1)
- # pylint: disable=E1103
- Q_full = delassus + torch.eye(3 * n_contacts) * 1e-4
-
- v_minus = v + dt * non_contact_acceleration
- q_full = pbmm(J, v_minus.unsqueeze(-1)) + (1 / dt) * phi_then_zero
-
- Q = torch.zeros_like(Q_full)
- q = torch.zeros_like(q_full)
- Q[contact_matrix_filter] += Q_full[contact_matrix_filter]
- q[contact_filter] += q_full[contact_filter]
-
- try:
- impulse_full = pbmm(
- reorder_mat,
- self.solver( #.apply(
- J_M,
- pbmm(reorder_mat.transpose(-1, -2),
- q).squeeze(-1)).detach().unsqueeze(-1))
- #pbmm(reorder_mat.transpose(-1, -2), q).squeeze(-1),
- #solver_eps).detach().unsqueeze(-1))
- except:
- print(f'J_M: {J_M}')
- print(f'reordered q: {pbmm(reorder_mat.transpose(-1, -2), q)}')
- pdb.set_trace()
-
-
- impulse = torch.zeros_like(impulse_full)
- impulse[contact_filter] += impulse_full[contact_filter]
- return v_minus + torch.linalg.solve(M.float(), pbmm(J.transpose(-1, -2),
- impulse)).squeeze(-1)
-
- def sim_step(self, x: Tensor, carry: Tensor) -> Tuple[Tensor, Tensor]:
- """``Integrator.partial_step`` wrapper for
- :py:meth:`forward_dynamics`."""
- q, v = self.space.q_v(x)
- # pylint: disable=E1103
- u = torch.zeros(q.shape[:-1] + (0,))
- v_plus = self.forward_dynamics(q, v, u)
- return v_plus, carry
-
- def summary(self, statistics: Dict) -> SystemSummary:
- """Generates summary statistics for multibody system.
-
- The scalars returned are simply the scalar description of the
- system's :py:class:`MultibodyTerms`.
-
- Meshes are generated for learned
- :py:class:`~dair_pll.geometry.DeepSupportConvex` es.
-
- Args:
- statistics: Updated evaluation statistics for the model.
-
- Returns:
- Scalars and meshes packaged into a ``SystemSummary``.
- """
- scalars, meshes = self.multibody_terms.scalars_and_meshes()
- videos = cast(Dict[str, Tuple[np.ndarray, int]], {})
-
- return SystemSummary(scalars=scalars, videos=videos, meshes=meshes)
-
-
-
-class DeepStateAugment3D(Module):
- """To assist with the learning process, replace the quaternion angular
- representation with the rotation matrix vector."""
-
- def __init__(self) -> None:
- super().__init__()
-
- def forward(self, x: Tensor) -> Tensor:
- # Note: The below lines only work because the fixed ground does not
- # contribute to the state of the overall object-ground system.
- quat = x[..., :4]
- rotmat_vec = quaternion_to_rotmat_vec(quat)
-
- return torch.cat((rotmat_vec, x[..., 4:]), dim=1)
-
- # TODO: write compute_jacobian function
diff --git a/dair_pll_old/dair_pll/multibody_terms.py b/dair_pll_old/dair_pll/multibody_terms.py
deleted file mode 100644
index 47aeaf8..0000000
--- a/dair_pll_old/dair_pll/multibody_terms.py
+++ /dev/null
@@ -1,865 +0,0 @@
-"""Mathematical implementation of multibody dynamics terms calculations.
-
-This file implements the :py:class:`MultibodyTerms` type, which interprets a
-list of urdfs as a learnable Lagrangian system with contact, taking the state
-space from the corresponding :py:class:`MultibodyPlantDiagram` as a given, and
-interpreting the various inertial and geometric terms stored within it as
-initial conditions of learnable parameters.
-
-Multibody dynamics can be derived from four functions of state [q,v]:
-
- * M(q), the generalized mass-matrix
- * F(q), the non-contact/Lagrangian force terms.
- * phi(q), the signed distance between collision candidates.
- * J(q), the contact-frame velocity Jacobian between collision candidates.
-
-The first two terms depend solely on state and inertial properties,
-and parameterize the contact-free Lagrangian dynamics as::
-
- dv/dt = (M(q) ** (-1)) * F(q)
-
-These terms are accordingly encapsulated in a :py:class:`LagrangianTerms`
-instance.
-
-The latter two terms depend solely on the geometry of bodies coming into
-contact, and are encapsulated in a :py:class:`ContactTerms` instance.
-
-For both sets of terms, we derive their functional form either directly or in
-part through symbolic analysis of the :py:class:`MultibodyPlant` of the
-associated :py:class:`MultibodyPlantDiagram`. The :py:class:`MultibodyTerms`
-object manages the symbolic calculation and has corresponding
-:py:class:`LagrangianTerms` and :py:class:`ContactTerms` members.
-"""
-from typing import List, Tuple, Callable, Dict, cast, Optional
-
-import drake_pytorch # type: ignore
-import numpy as np
-import torch
-import pdb
-
-from pydrake.geometry import SceneGraphInspector, GeometryId # type: ignore
-from pydrake.multibody.plant import MultibodyPlant_ # type: ignore
-from pydrake.multibody.tree import JacobianWrtVariable # type: ignore
-from pydrake.multibody.tree import ModelInstanceIndex # type: ignore
-from pydrake.multibody.tree import SpatialInertia_, UnitInertia_, \
- RotationalInertia_ # type: ignore
-from pydrake.symbolic import Expression, Variable # type: ignore
-from pydrake.symbolic import MakeVectorVariable, Jacobian # type: ignore
-from pydrake.systems.framework import Context # type: ignore
-from scipy.spatial.transform import Rotation
-from torch import Tensor
-from torch.nn import Module, ModuleList, Parameter
-
-from dair_pll import drake_utils
-from dair_pll.deep_support_function import extract_mesh_from_support_function, \
- get_mesh_summary_from_polygon
-from dair_pll.drake_state_converter import DrakeStateConverter
-from dair_pll.drake_utils import MultibodyPlantDiagram
-from dair_pll.geometry import GeometryCollider, \
- PydrakeToCollisionGeometryFactory, \
- CollisionGeometry, DeepSupportConvex, Polygon, Box, \
- Plane, _NOMINAL_HALF_LENGTH
-from dair_pll.inertia import InertialParameterConverter
-from dair_pll.system import MeshSummary
-from dair_pll.tensor_utils import (pbmm, deal, spatial_to_point_jacobian)
-
-ConfigurationInertialCallback = Callable[[Tensor, Tensor], Tensor]
-StateInputInertialCallback = Callable[[Tensor, Tensor, Tensor, Tensor], Tensor]
-
-CENTER_OF_MASS_DOF = 3
-INERTIA_TENSOR_DOF = 6
-DEFAULT_SIMPLIFIER = drake_pytorch.Simplifier.QUICKTRIG
-
-INERTIA_PARAM_OPTIONS = ['none', 'masses', 'CoMs', 'CoMs and masses', 'all']
-
-
-# noinspection PyUnresolvedReferences
-def init_symbolic_plant_context_and_state(
- plant_diagram: MultibodyPlantDiagram
-) -> Tuple[MultibodyPlant_[Expression], Context, np.ndarray, np.ndarray]:
- """Generates a symbolic interface for a :py:class:`MultibodyPlantDiagram`.
-
- Generates a new Drake ``Expression`` data type state in
- :py:class:`StateSpace` format, and sets this state inside a new context for
- a symbolic version of the diagram's :py:class:`MultibodyPlant`.
-
- Args:
- plant_diagram: Drake MultibodyPlant diagram to convert to symbolic.
-
- Returns:
- New symbolic plant.
- New plant's context, with symbolic states set.
- (n_q,) symbolic :py:class:`StateSpace` configuration.
- (n_v,) symbolic :py:class:`StateSpace` velocity.
- """
- plant = plant_diagram.plant.ToSymbolic()
- space = plant_diagram.space
- context = plant.CreateDefaultContext()
-
- # :py:class:`StateSpace` representation of Plant's state.
- q = MakeVectorVariable(plant.num_positions(), 'q', Variable.Type.CONTINUOUS)
- v = MakeVectorVariable(plant.num_velocities(), 'v',
- Variable.Type.CONTINUOUS)
- x = np.concatenate([q, v], axis=-1)
-
- # Set :py:class:`StateSpace` symbolic state inside
- DrakeStateConverter.state_to_context(plant, context, x,
- plant_diagram.model_ids, space)
- return plant, context, q, v
-
-
-class LagrangianTerms(Module):
- """Container class for non-contact/Lagrangian dynamics terms.
-
- Accepts batched pytorch callback functions for M(q) and F(q) and related
- contact terms in ``theta`` format (see ``inertia.py``).
- """
- mass_matrix: Optional[ConfigurationInertialCallback]
- lagrangian_forces: Optional[StateInputInertialCallback]
- inertial_parameters: Parameter
- inertia_mode_txt: str
-
- def __init__(self, plant_diagram: MultibodyPlantDiagram,
- inertia_mode: int) -> None:
- """Inits :py:class:`LagrangianTerms` with prescribed parameters and
- functional forms.
-
- Args:
- plant_diagram: Drake MultibodyPlant diagram to extract terms from.
- inertia_mode: An integer 0, 1, 2, 3, or 4 representing the
- inertial parameters the model can learn. The higher the number
- the more inertial parameters are free to be learned, and 0
- corresponds to learning no inertial parameters.
- """
- super().__init__()
-
- plant, context, q, v = init_symbolic_plant_context_and_state(
- plant_diagram)
- gamma = Jacobian(plant.GetVelocities(context), v)
-
- body_parameters, body_variables = \
- LagrangianTerms.extract_body_parameters_and_variables(
- plant, plant_diagram.model_ids, context)
-
- mass_matrix_expression = \
- gamma.T @ plant.CalcMassMatrixViaInverseDynamics(context) @ gamma
-
- self.mass_matrix, _ = drake_pytorch.sym_to_pytorch(
- mass_matrix_expression,
- q,
- body_variables,
- simplify_computation=DEFAULT_SIMPLIFIER)
-
- u = MakeVectorVariable(plant.num_actuated_dofs(), 'u',
- Variable.Type.CONTINUOUS)
- drake_forces_expression = -plant.CalcBiasTerm(
- context) + plant.MakeActuationMatrix(
- ) @ u + plant.CalcGravityGeneralizedForces(context)
-
- lagrangian_forces_expression = gamma.T @ drake_forces_expression
- self.lagrangian_forces, _ = drake_pytorch.sym_to_pytorch(
- lagrangian_forces_expression,
- q,
- v,
- u,
- body_variables,
- simplify_computation=DEFAULT_SIMPLIFIER)
-
- # pylint: disable=E1103
- self.inertial_parameters = Parameter(body_parameters,
- requires_grad=True)
-
- # store the original inertial parameters since not all will be learned
- self.original_pi_cm_params = InertialParameterConverter.theta_to_pi_cm(
- self.inertial_parameters.detach())
-
- # keep track of what inertial parameters will or will not be learned
- self.inertia_mode_txt = INERTIA_PARAM_OPTIONS[inertia_mode]
-
- def inertial_params(self):
- # Make a method here instead of accessing the parameters directly so we
- # can overwrite any parameters that will not be learned.
-
- # First, convert the current inertial parameters to pi_cm format.
- curr_pi_cm = InertialParameterConverter.theta_to_pi_cm(
- self.inertial_parameters)
-
- # Reminder, pi_cm format is:
- # [m, m * p_x, m * p_y, m * p_z, I_xx, I_yy, I_zz, I_xy, I_xz, I_yz]
-
- # Overwrite any inertial parameters that should not be learned. In all
- # cases, overwrite the mass of the first object to handle scale
- # invariance.
- orig = self.original_pi_cm_params
- orig_m_total = orig[:, 0].sum()
-
- # Since the overall mass is unobservable, the only learnable parameters
- # here should be the relative distribution of mass among all links.
- curr_m_total = curr_pi_cm[:, 0].sum()
- curr_pi_cm[:, 0] *= orig_m_total/curr_m_total
-
- # Overwrite more parameters based on the inertia mode.
- mode = self.inertia_mode_txt
- if mode != 'all':
- # Overwrite the moments of inertia unless learning all remaining
- # parameters. Use direct assignment to zero the parameter gradient.
- curr_pi_cm[:, 4:] = orig[:, 4:]
-
- # Overwrite the masses unless learning those.
- if (mode == 'none') or ('masses' not in mode):
- # Overwrite the masses. Use direct assignment to zero the
- # parameter gradients.
- curr_pi_cm[:, 0] = orig[:, 0]
-
- # Overwrite the center of masses unless learning those.
- if (mode == 'none') or ('CoMs' not in mode):
- # pi format has the center of masses multiplied by the mass, so
- # ensure physical feasibility by scaling the parameters with the
- # current mass belief and not the original mass (which may
- # differ). Use direct assignment to zero the parameter
- # gradient.
- curr_pi_cm[:, 1:4] = (orig[:, 1:4].T * \
- curr_pi_cm[:, 0].detach() / orig[:, 0]).T
-
- # convert back to inertial parameters
- return InertialParameterConverter.pi_cm_to_theta(curr_pi_cm)
-
- # noinspection PyUnresolvedReferences
- @staticmethod
- def extract_body_parameters_and_variables(
- plant: MultibodyPlant_[Expression],
- model_ids: List[ModelInstanceIndex],
- context: Context) -> Tuple[Tensor, np.ndarray]:
- """Generates parameterization and symbolic variables for all bodies.
-
- For a multibody plant, finds all bodies that should have inertial
- properties; extracts the current values as an initial condition for
- ``theta``-format learnable parameters, and sets new symbolic versions of
- these variables.
-
- Args:
- plant: Symbolic plant from which to extract parameterization.
- model_ids: List of models in plant.
- context: Plant's symbolic context.
-
- Returns:
- (n_bodies, 10) ``theta`` parameters initial conditions.
- (n_bodies, 10) symbolic inertial variables.
- """
- all_bodies, all_body_ids = drake_utils.get_all_inertial_bodies(
- plant, model_ids)
-
- body_parameter_list = []
- body_variable_list = []
- for body, body_id in zip(all_bodies, all_body_ids):
- mass = Variable(f'{body_id}_m', Variable.Type.CONTINUOUS)
- p_BoBcm_B = MakeVectorVariable(CENTER_OF_MASS_DOF, f'{body_id}_com',
- Variable.Type.CONTINUOUS)
- I_BBcm_B = MakeVectorVariable(INERTIA_TENSOR_DOF, f'{body_id}_I',
- Variable.Type.CONTINUOUS)
-
- # get original values
- body_parameter_list.append(
- InertialParameterConverter.drake_to_theta(
- body.CalcSpatialInertiaInBodyFrame(context)))
-
- body_spatial_inertia = \
- SpatialInertia_[Expression].MakeFromCentralInertia(
- mass=mass, p_PScm_E=p_BoBcm_B,
- I_SScm_E=RotationalInertia_[Expression](*I_BBcm_B))
-
- body.SetMass(context, mass)
- body.SetSpatialInertiaInBodyFrame(context, body_spatial_inertia)
- body_variable_list.append(np.hstack((mass, p_BoBcm_B, I_BBcm_B)))
- # pylint: disable=E1103
- return torch.stack(body_parameter_list), np.vstack(body_variable_list)
-
- def pi_cm(self) -> Tensor:
- """Returns inertial parameters in human-understandable ``pi_cm``
- -format"""
- return InertialParameterConverter.theta_to_pi_cm(self.inertial_params())
-
- def forward(self, q: Tensor, v: Tensor, u: Tensor) -> Tuple[Tensor, Tensor]:
- """Evaluates Lagrangian dynamics terms at given state and input.
-
- Args:
- q: (\*, n_q) configuration batch.
- v: (\*, n_v) velocity batch.
- u: (\*, n_u) input batch.
-
- Returns:
- (\*, n_v, n_v) mass matrix batch M(q)
- (\*, n_v) Lagrangian contact-free acceleration inv(M(q)) F(q)
- """
- # Pylint bug: cannot recognize instance attributes as Callable.
- # pylint: disable=not-callable
- assert self.mass_matrix is not None
- assert self.lagrangian_forces is not None
- inertia = \
- InertialParameterConverter.pi_cm_to_drake_spatial_inertia_vector(
- self.pi_cm())
- inertia = inertia.expand(q.shape[:-1] + inertia.shape)
-
- M = self.mass_matrix(q, inertia)
- non_contact_acceleration = torch.linalg.solve(
- M, self.lagrangian_forces(q, v, u, inertia))
- return M, non_contact_acceleration
-
-
-ConfigurationCallback = Callable[[Tensor], Tensor]
-
-
-def make_configuration_callback(expression: np.ndarray, q: np.ndarray) -> \
- Callable[[Tensor], Tensor]:
- """Converts drake symbolic expression to pytorch function via
- ``drake_pytorch``."""
- return cast(
- Callable[[Tensor], Tensor],
- drake_pytorch.sym_to_pytorch(
- expression, q, simplify_computation=DEFAULT_SIMPLIFIER)[0])
-
-
-class ContactTerms(Module):
- """Container class for contact-related dynamics terms.
-
- Derives batched pytorch callback functions for collision geometry
- position and velocity kinematics from a
- :class:`~dair_pll.drake_utils.MultibodyPlantDiagram`.
- """
- geometry_rotations: Optional[ConfigurationCallback]
- geometry_translations: Optional[ConfigurationCallback]
- geometry_spatial_jacobians: Optional[ConfigurationCallback]
- geometries: ModuleList
- geometry_local_poses: Parameter
- friction_params: Parameter
- collision_candidates: Tensor
-
- def __init__(self, plant_diagram: MultibodyPlantDiagram,
- represent_geometry_as: str = 'box') -> None:
- """Inits :py:class:`ContactTerms` with prescribed kinematics and
- geometries.
-
- phi(q) and J(q) are calculated implicitly from kinematics and ``n_g ==
- len(geometries)`` collision geometries C.
-
- Args:
- plant_diagram: Drake MultibodyPlant diagram to extract terms from.
- represent_geometry_as: How to represent the geometry of any
- learnable bodies (box/mesh/polygon). By default, any ``Plane``
- objects are not considered learnable -- only boxes or meshes.
- """
- # pylint: disable=too-many-locals
- super().__init__()
- plant, context, q, v = init_symbolic_plant_context_and_state(
- plant_diagram)
- inspector = plant_diagram.scene_graph.model_inspector()
-
- collision_geometry_set = plant_diagram.collision_geometry_set
- geometry_ids = collision_geometry_set.ids
- coulomb_frictions = collision_geometry_set.frictions
- collision_candidates = collision_geometry_set.collision_candidates
-
- # sweep over collision elements
- geometries, rotations, translations, drake_spatial_jacobians = \
- ContactTerms.extract_geometries_and_kinematics(
- plant, inspector, geometry_ids, context, represent_geometry_as)
-
- for geometry_index, geometry_pair in enumerate(collision_candidates):
- if geometries[geometry_pair[0]] > geometries[geometry_pair[1]]:
- collision_candidates[geometry_index] = (geometry_pair[1],
- geometry_pair[0])
-
- self.geometry_rotations = make_configuration_callback(
- np.stack(rotations), q)
-
- self.geometry_translations = make_configuration_callback(
- np.stack(translations), q)
-
- drake_velocity_jacobian = Jacobian(plant.GetVelocities(context), v)
- self.geometry_spatial_jacobians = make_configuration_callback(
- np.stack([
- jacobian @ drake_velocity_jacobian
- for jacobian in drake_spatial_jacobians
- ]), q)
-
- self.geometries = ModuleList(geometries)
-
- mu_static = Tensor(
- [friction.static_friction() for friction in coulomb_frictions])
-
- self.friction_params = Parameter(mu_static, requires_grad=True)
-
- self.collision_candidates = Tensor(collision_candidates).t().long()
-
- def get_friction_coefficients(self) -> Tensor:
- """From the stored :py:attr:`friction_params`, compute the friction
- coefficient as its absolute value."""
- positive_friction_params = torch.abs(self.friction_params)
-
- # Overwrite the friction parameter associated with the ground.
- # HACK: TODO BIBIT this hard codes the ground as [1] in self.geometries
- positive_friction_params[1] = 1.0
-
- return positive_friction_params
-
- # noinspection PyUnresolvedReferences
- @staticmethod
- def extract_geometries_and_kinematics(
- plant: MultibodyPlant_[Expression], inspector: SceneGraphInspector,
- geometry_ids: List[GeometryId], context: Context,
- represent_geometry_as: str
- ) -> Tuple[List[CollisionGeometry], List[np.ndarray], List[np.ndarray],
- List[np.ndarray]]:
- """Extracts modules and kinematics of list of geometries G.
-
- Args:
- plant: Multibody plant from which terms are extracted.
- inspector: Scene graph inspector associated with plant.
- geometry_ids: List of geometries to model.
- context: Plant's context with symbolic state.
- represent_geometry_as: How to represent learnable geometries.
-
- Returns:
- List of :py:class:`CollisionGeometry` models with one-to-one
- correspondence with provided geometries.
- List[(3,3)] of corresponding rotation matrices R_WG
- List[(3,)] of corresponding geometry frame origins p_WoGo_W
- List[(6,n_v)] of geometry spatial jacobians w.r.t. drake velocity
- coordinates, J(v_drake)_V_WG_W
- """
- world_frame = plant.world_frame()
- geometries = []
- rotations = []
- translations = []
- drake_spatial_jacobians = []
-
- for geometry_id in geometry_ids:
- geometry_pose = inspector.GetPoseInFrame(
- geometry_id).cast[Expression]()
-
- geometry_frame = plant.GetBodyFromFrameId(
- inspector.GetFrameId(geometry_id)).body_frame()
-
- geometry_transform = geometry_frame.CalcPoseInWorld(
- context) @ geometry_pose
-
- rotations.append(geometry_transform.rotation().matrix())
-
- translations.append(geometry_transform.translation())
-
- drake_spatial_jacobian = plant.CalcJacobianSpatialVelocity(
- context=context,
- with_respect_to=JacobianWrtVariable.kV,
- frame_B=geometry_frame,
- p_BoBp_B=geometry_pose.translation().reshape(3, 1),
- frame_A=world_frame,
- frame_E=world_frame)
- drake_spatial_jacobians.append(drake_spatial_jacobian)
-
- geometries.append(
- PydrakeToCollisionGeometryFactory.convert(
- inspector.GetShape(geometry_id), represent_geometry_as))
-
- return geometries, rotations, translations, drake_spatial_jacobians
-
- @staticmethod
- def assemble_velocity_jacobian(R_CW, Jv_V_WC_W, p_CoCc_C):
- """Helper method to generate velocity jacobian from contact information.
-
- Args:
- R_CW: (\*, n_c, 3, 3) Rotation of world frame w.r.t. geometry frame.
- Jv_V_WC_W: (\*, 1, 6, n_v) Geometry spatial velocity Jacobian.
- p_CoCc_C: (\*, n_c, 3) Geometry-frame contact points.
-
- Returns:
- (\*, n_c, 3, n_v) World-frame contact point translational velocity
- Jacobian.
- """
- p_CoCc_W = pbmm(p_CoCc_C.unsqueeze(-2), R_CW).squeeze(-2)
- Jv_v_WCc_W = pbmm(spatial_to_point_jacobian(p_CoCc_W), Jv_V_WC_W)
- return Jv_v_WCc_W
-
- @staticmethod
- def relative_velocity_to_contact_jacobian(Jv_v_W_BcAc_F: Tensor,
- mu: Tensor) -> Tensor:
- """Helper method to reorder contact Jacobian columns.
-
- Args:
- Jv_v_W_BcAc_F: (\*, n_collisions, 3, n_v) collection of
- contact-frame relative velocity Jacobians.
- mu: (n_collisions,) list of
-
- Returns:
- (\*, 3 * n_collisions, n_v) contact jacobian J(q) in [J_n; mu * J_t]
- ordering.
- """
- # Tuple of (*, n_collisions, n_v)
- J_x, J_y, J_z = deal(Jv_v_W_BcAc_F, -2)
-
- J_n = J_z
-
- # Reshape (*, n_collisions, 2 * n_v) -> (*, 2 * n_collisions, n_v)
- # pylint: disable=E1103
- mu_shape = torch.Size((1,) * (J_x.dim() - 2) + mu.shape + (1,))
- friction_jacobian_shape = J_x.shape[:-2] + (-1, J_x.shape[-1])
- J_t = (mu.reshape(mu_shape) * torch.cat((J_x, J_y), dim=-1)) \
- .reshape(friction_jacobian_shape)
- return torch.cat((J_n, J_t), dim=-2)
-
- def forward(self, q: Tensor) -> Tuple[Tensor, Tensor]:
- """Evaluates Lagrangian dynamics terms at given state and input.
-
- Uses :py:class:`GeometryCollider` and kinematics to construct signed
- distance phi(q) and the corresponding Jacobian J(q).
-
- phi(q) and J(q) are calculated implicitly from kinematics and collision
- geometries.
-
- Args:
- q: (\*, n_q) configuration batch.
- indices that can collide.
-
- Returns:
- (\*, n_collisions) signed distance phi(q).
- (\*, 3 * n_collisions, n_v) contact Jacobian J(q).
- """
- # Pylint bug: cannot recognize instance attributes as Callable.
- # pylint: disable=too-many-locals,not-callable
- assert self.geometry_rotations is not None
- assert self.geometry_translations is not None
- assert self.geometry_spatial_jacobians is not None
- R_WC = self.geometry_rotations(q)
- p_WoCo_W = self.geometry_translations(q)
- Jv_V_WC_W = self.geometry_spatial_jacobians(q)
-
- indices_a = self.collision_candidates[0, :]
- indices_b = self.collision_candidates[1, :]
-
- geometries_a = [
- cast(CollisionGeometry, self.geometries[element_index])
- for element_index in indices_a
- ]
- geometries_b = [
- cast(CollisionGeometry, self.geometries[element_index])
- for element_index in indices_b
- ]
-
- friction_coefficients = self.get_friction_coefficients()
- mu_a = friction_coefficients[indices_a]
- mu_b = friction_coefficients[indices_b]
-
- # combine friction coefficients as in Drake.
- mu = (2 * mu_a * mu_b) / (mu_a + mu_b)
-
- R_WA = R_WC[..., indices_a, :, :]
- R_AW = deal(R_WA.transpose(-1, -2), -3)
- R_BW = deal(R_WC[..., indices_b, :, :].transpose(-1, -2), -3)
-
- Jv_V_WA_W = deal(Jv_V_WC_W[..., indices_a, :, :], -3, keep_dim=True)
- Jv_V_WB_W = deal(Jv_V_WC_W[..., indices_b, :, :], -3, keep_dim=True)
-
- # Interbody translation in A frame, shape (*, n_g, 3)
- p_AoBo_W = p_WoCo_W[..., indices_b, :] - p_WoCo_W[..., indices_a, :]
- p_AoBo_A = deal(pbmm(p_AoBo_W.unsqueeze(-2), R_WA).squeeze(-2), -2)
-
- Jv_v_W_BcAc_F = []
- phi_list = []
-
- # bundle all modules and kinematics into a tuple iterator
- a_b = zip(geometries_a, geometries_b, R_AW, R_BW, p_AoBo_A, Jv_V_WA_W,
- Jv_V_WB_W)
-
- # iterate over body pairs (Ai, Bi)
- for geo_a, geo_b, R_AiW, R_BiW, p_AiBi_A, Jv_V_WAi_W, Jv_V_WBi_W in a_b:
- # relative rotation between Ai and Bi, (*, 3, 3)
- R_AiBi = pbmm(R_AiW, R_BiW.transpose(-1, -2))
-
- # collision result,
- # Tuple[(*, n_c), (*, n_c, 3, 3), (*, n_c, 3), (*, n_c, 3)]
- phi_i, R_AiF, p_AiAc_A, p_BiBc_B = GeometryCollider.collide(
- geo_a, geo_b, R_AiBi, p_AiBi_A)
-
- # contact frame rotation, (*, n_c, 3, 3)
- R_FW = pbmm(R_AiF.transpose(-1, -2), R_AiW.unsqueeze(-3))
-
- # contact point velocity jacobians, (*, n_c, 3, n_v)
- Jv_v_WAc_W = ContactTerms.assemble_velocity_jacobian(
- R_AiW.unsqueeze(-3), Jv_V_WAi_W, p_AiAc_A)
- Jv_v_WBc_W = ContactTerms.assemble_velocity_jacobian(
- R_BiW.unsqueeze(-3), Jv_V_WBi_W, p_BiBc_B)
-
- # contact relative velocity, (*, n_c, 3, 3)
- Jv_v_W_BcAc_F.append(pbmm(R_FW, Jv_v_WBc_W - Jv_v_WAc_W))
- phi_list.append(phi_i)
-
- # pylint: disable=E1103
- mu_repeated = torch.cat(
- [mu_i.repeat(phi_i.shape[-1]) for phi_i, mu_i in zip(phi_list, mu)])
- phi = torch.cat(phi_list, dim=-1) # type: Tensor
- J = ContactTerms.relative_velocity_to_contact_jacobian(
- torch.cat(Jv_v_W_BcAc_F, dim=-3), mu_repeated)
-
- return phi, J
-
-
-class MultibodyTerms(Module):
- """Derives and manages computation of terms of multibody dynamics with
- contact.
-
- Primarily
- """
- lagrangian_terms: LagrangianTerms
- contact_terms: ContactTerms
- geometry_body_assignment: Dict[str, List[int]]
- plant_diagram: MultibodyPlantDiagram
- urdfs: Dict[str, str]
- inertia_mode: int
-
- def scalars_and_meshes(
- self) -> Tuple[Dict[str, float], Dict[str, MeshSummary]]:
- """Generates summary statistics for inertial and geometric quantities."""
- scalars = {}
- meshes = {}
- _, all_body_ids = \
- drake_utils.get_all_inertial_bodies(
- self.plant_diagram.plant,
- self.plant_diagram.model_ids)
-
- friction_coefficients = self.contact_terms.get_friction_coefficients()
-
- for body_pi, body_id in zip(self.lagrangian_terms.pi_cm(), all_body_ids):
- body_scalars = InertialParameterConverter.pi_cm_to_scalars(body_pi)
-
- scalars.update({
- f'{body_id}_{scalar_name}': scalar
- for scalar_name, scalar in body_scalars.items()
- })
-
- for geometry_index in self.geometry_body_assignment[body_id]:
- # include geometry
- geometry = self.contact_terms.geometries[geometry_index]
- geometry_scalars = geometry.scalars()
- scalars.update({
- f'{body_id}_{scalar_name}': scalar
- for scalar_name, scalar in geometry_scalars.items()
- })
-
- # include friction
- scalars[f'{body_id}_mu'] = \
- friction_coefficients[geometry_index].item()
-
- geometry_mesh = None
- if isinstance(geometry, DeepSupportConvex):
- print(">>>>>>>>>> deep support")
- geometry_mesh = extract_mesh_from_support_function(
- geometry.network)
-
- elif isinstance(geometry, Polygon):
- print(">>>>>>>>>>> polygon")
- geometry_mesh = get_mesh_summary_from_polygon(geometry)
-
- if geometry_mesh != None:
- meshes[body_id] = geometry_mesh
- vertices = geometry_mesh.vertices
- diameters = vertices.max(dim=0).values - vertices.min(
- dim=0).values
- center = vertices.min(dim=0).values + diameters / 2
- scalars.update({
- f'{body_id}_diameter_{axis}': value.item()
- for axis, value in zip(['x', 'y', 'z'], diameters)
- })
- scalars.update({
- f'{body_id}_center_{axis}': value.item()
- for axis, value in zip(['x', 'y', 'z'], center)
- })
-
- return scalars, meshes
-
- def forward(self, q: Tensor, v: Tensor,
- u: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
- """Evaluates multibody system dynamics terms at given state and input.
-
- Calculation is performed as a thin wrapper around
- :py:class:`LagrangianTerms` and :py:class:`ContactTerms`. For
- convenience, this function also returns the Delassus operator
- `D(q) = J(q)^T inv(M(q)) J(q)`.
-
- Args:
- q: (\*, n_q) configuration batch.
- v: (\*, n_v) velocity batch.
- u: (\*, n_u) input batch.
-
- Returns:
- (\*, 3 * n_collisions, 3 * n_collisions) Delassus operator D(q).
- (\*, n_v, n_v) mass matrix batch M(q).
- (\*, 3 * n_collisions, n_v) contact Jacobian J(q).
- (\*, n_collisions) signed distance phi(q).
- (\*, n_v) Contact-free acceleration inv(M(q)) * F(q).
- """
- M, non_contact_acceleration = self.lagrangian_terms(q, v, u)
- phi, J = self.contact_terms(q)
-
- delassus = pbmm(J, torch.linalg.solve(M, J.transpose(-1, -2)))
- return delassus, M, J, phi, non_contact_acceleration
-
- def __init__(self, urdfs: Dict[str, str], inertia_mode: int,
- represent_geometry_as: str = 'box',
- randomize_initialization: bool = False,
- g_frac: float = 1.0) -> None:
- """Inits :py:class:`MultibodyTerms` for system described in URDFs
-
- Interpretation is performed as a thin wrapper around
- :py:class:`LagrangianTerms` and :py:class:`ContactTerms`.
-
- As this module is also responsible for evaluating updated URDF
- representations, the associations between bodies and geometries is
- also tracked to enable URDF rendering in
- ``MultibodyTerms.EvalUrdfRepresentation`` and Tensorboard logging in
- ``MultibodyTerms.scalars``.
-
- Args:
- urdfs: Dictionary of named URDF XML file names, containing
- description of multibody system.
- inertia_mode: An integer 0, 1, 2, 3, or 4 representing the
- inertial parameters the model can learn. The higher the number
- the more inertial parameters are free to be learned, and 0
- corresponds to learning no inertial parameters.
- represent_geometry_as: String box/mesh/polygon to determine how
- the geometry should be represented.
- """
- super().__init__()
-
- plant_diagram = MultibodyPlantDiagram(urdfs, g_frac=g_frac)
- plant = plant_diagram.plant.ToSymbolic()
- inspector = plant_diagram.scene_graph.model_inspector()
-
- _, all_body_ids = drake_utils.get_all_bodies(plant,
- plant_diagram.model_ids)
-
- # sweep over collision elements
- geometry_body_assignment: Dict[str, List[int]] = {
- body_id: [] for body_id in all_body_ids
- }
-
- geometry_ids = plant_diagram.collision_geometry_set.ids
-
- for geometry_index, geometry_id in enumerate(geometry_ids):
- geometry_frame_id = inspector.GetFrameId(geometry_id)
- geometry_body = plant.GetBodyFromFrameId(geometry_frame_id)
- geometry_body_identifier = drake_utils.unique_body_identifier(
- plant, geometry_body)
- geometry_body_assignment[geometry_body_identifier].append(
- geometry_index)
-
- # setup parameterization
- self.lagrangian_terms = LagrangianTerms(plant_diagram, inertia_mode)
- self.contact_terms = ContactTerms(plant_diagram, represent_geometry_as)
- self.geometry_body_assignment = geometry_body_assignment
- self.plant_diagram = plant_diagram
- self.urdfs = urdfs
-
- def randomize_multibody_terms(self, inertia_int) -> None:
- r"""Adds random noise to multibody terms in the following ways:
- - Geometry lengths can be between 0.95 and 1.05 times their original
- length.
- - Friction can be between 0.5 and 1.5 times their original size.
- - Total mass does not change.
- - Inertia is determined via:
- - Choose a random set of three length scales between 0.5 and 1.5
- times the true length.
- - Choose a random mass fraction :math:`\nu` between 0.5 and 1.5.
- - Define :math:`I_{xx,princ.axis}` along a principal axis as
- :math:`\frac{\nu m}{12} (l_y^2 + l_z^2)`, and similarly for
- :math:`I_{yy,princ.axis}` and :math:`I_{zz,princ.axis}`.
- - Generate a random rotation in SO(3) and transform the
- principal axis inertia matrix with it.
- - Define all moments and products of inertia from this rotated
- version.
- """
- def scale_factory(x: Tensor) -> Tensor:
- """Return a scaled version of the input such that each element in
- the input individually is randomly scaled between 50% and 150% of
- its original value."""
- # return torch.mul(x, torch.rand_like(x) + 0.5)
- scaling_factor = 0.1 * torch.rand_like(x) + 0.95
- return torch.mul(x, scaling_factor)
-
- # First do friction all at once. Note that
- # self.contact_terms.get_friction_coefficients will ensure the ground's
- # friction gets properly rewritten to 1.0.
- new_friction_params = scale_factory(self.contact_terms.friction_params)
- new_friction_params[1] = 1.0 # hack, ground is always element 1
- self.contact_terms.friction_params = Parameter(
- new_friction_params, requires_grad=True)
-
- # Second, randomize the geometry.
- for geometry in self.contact_terms.geometries:
- # Don't make changes for ground geometry.
- if isinstance(geometry, Plane):
- continue
- elif isinstance(geometry, Box):
- geometry.length_params = \
- Parameter(scale_factory(geometry.length_params),
- requires_grad=True)
- elif isinstance(geometry, Polygon):
- geometry.vertices_parameter = \
- Parameter(scale_factory(geometry.vertices_parameter),
- requires_grad=True)
- else:
- raise NotImplementedError("Can only randomize Box and Polygon "
- "geometries.")
-
- # Third, randomize the inertia. Only randomize the learnable params.
- if INERTIA_PARAM_OPTIONS[inertia_int] != 'none' and \
- INERTIA_PARAM_OPTIONS[inertia_int] != 'masses':
- # Use a while loop to prevent nan situations.
- keep_trying = True
- while keep_trying:
- n_bodies = self.lagrangian_terms.inertial_parameters.shape[0]
- for idx in range(n_bodies):
- pi_cm = self.lagrangian_terms.original_pi_cm_params[idx]
-
- # Let the center of mass be anywhere within the inner half
- # of a nominal geometry.
- mass = pi_cm[0].item()
- pi_cm[1:4] += mass*(torch.rand(3) - 0.5) \
- * _NOMINAL_HALF_LENGTH
-
- if INERTIA_PARAM_OPTIONS[inertia_int] == 'all':
- # Define the moments of inertia assuming a solid block
- # of homogeneous density with random mass and random
- # lengths.
- rand_mass = mass * (torch.rand(1) + 0.5)
- rand_lens = (torch.rand(3) + 0.5) * _NOMINAL_HALF_LENGTH
- scaling = rand_mass/12
- Ixx_pa = scaling * (rand_lens[1]**2 + rand_lens[2]**2)
- Iyy_pa = scaling * (rand_lens[0]**2 + rand_lens[2]**2)
- Izz_pa = scaling * (rand_lens[0]**2 + rand_lens[1]**2)
-
- # Randomly rotate the principal axes.
- rot_mat = Tensor(Rotation.random().as_matrix())
- I_mat_pa = Tensor([[Ixx_pa, 0., 0.],
- [0., Iyy_pa, 0.],
- [0., 0., Izz_pa]])
- I_rand = rot_mat.T @ I_mat_pa @ rot_mat
-
- # Grab the moments and products of inertia from this
- # result.
- Ixx, Iyy, Izz = I_rand[0,0], I_rand[1,1], I_rand[2,2]
- Ixy, Ixz, Iyz = I_rand[0,1], I_rand[1,2], I_rand[1,2]
-
- pi_cm[4:7] = Tensor([Ixx, Iyy, Izz])
- pi_cm[7:10] = Tensor([Ixy, Ixz, Iyz])
-
- self.lagrangian_terms.original_pi_cm_params[idx] = pi_cm
-
- new_theta_params = InertialParameterConverter.pi_cm_to_theta(
- self.lagrangian_terms.original_pi_cm_params)
- if not torch.any(torch.isnan(new_theta_params)):
- keep_trying = False
-
- new_theta_params = InertialParameterConverter.pi_cm_to_theta(
- self.lagrangian_terms.original_pi_cm_params)
-
- self.lagrangian_terms.inertial_parameters = Parameter(
- new_theta_params, requires_grad=True)
diff --git a/dair_pll_old/dair_pll/plot_styler.py b/dair_pll_old/dair_pll/plot_styler.py
deleted file mode 100644
index 5924265..0000000
--- a/dair_pll_old/dair_pll/plot_styler.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import numpy as np
-import os
-import matplotlib
-# change matplotlib backen to work via ssh
-from matplotlib.collections import LineCollection
-import matplotlib.lines as mlines
-import matplotlib.pyplot as plt
-from matplotlib.path import Path
-from matplotlib.patches import PathPatch
-
-import pdb
-
-USETEX = False
-class PlotStyler():
-
- def __init__(self):
- # self.cmap = plt.get_cmap('tab10')
- self.cmap = plt.get_cmap('tab20')
- self.blue = '#011F5B'
- self.red = '#990000'
- self.yellow = '#F2C100'
- self.grey = '#909090'
- self.orange = '#FE7F0E'
- self.directory = None
- self.penn_color_wheel = [self.blue, self.red, self.yellow, self.grey, self.orange]
- return
-
- def set_default_styling(self, directory=None, figsize=None):
- self.directory = directory
- matplotlib.rcParams["savefig.directory"] = directory
- matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
- matplotlib.rc('text.latex', preamble=r'\usepackage{underscore}')
- # matplotlib.rcParams['figure.figsize'] = 20, 12
- # matplotlib.rcParams['figure.figsize'] = 20, 6
- # matplotlib.rcParams['figure.figsize'] = 8, 5
- if (figsize == None):
- matplotlib.rcParams['figure.figsize'] = 8, 12
- else:
- matplotlib.rcParams['figure.figsize'] = figsize[0], figsize[1]
- matplotlib.rcParams['figure.autolayout'] = True
- font = {'size': 24, 'family':'serif', 'serif':['Computer Modern']}
-
- matplotlib.rc('text', usetex=False)
- matplotlib.rc('font', **font)
- matplotlib.rcParams['lines.linewidth'] = 4
- matplotlib.rcParams['axes.titlesize'] = 30
- matplotlib.rcParams['xtick.major.size'] = 15
- matplotlib.rcParams['xtick.major.width'] = 1
- matplotlib.rcParams['xtick.minor.size'] = 7
- matplotlib.rcParams['xtick.minor.width'] = 1
- plt.set_cmap('tab20')
- self.directory = directory
-
- def set_figsize(self, size):
- matplotlib.rcParams['figure.figsize'] = size
-
- def plot(self, xdata, ydata, xlim=None, ylim=None, color=None, linestyle=None,
- grid=True, xlabel=None, ylabel=None, title=None, legend=None, data_label=None):
-
- plt.plot(xdata, ydata, color=color, linestyle=linestyle, label=data_label)
- if xlim:
- plt.xlim(xlim)
- if ylim:
- plt.ylim(ylim)
- if xlabel:
- # plt.xlabel(xlabel, fontweight="bold")
- plt.xlabel(xlabel)
- if ylabel:
- # plt.ylabel(ylabel, fontweight="bold")
- plt.ylabel(ylabel)
- if title:
- # plt.title(title, fontweight="bold")
- plt.title(title)
- if legend:
- plt.legend(legend)
-
- plt.grid(grid, which='major')
-
- def step(self, xdata, ydata, xlim=None, ylim=None, color=None,
- grid=True, xlabel=None, ylabel=None, title=None, legend=None, data_label=None):
-
- plt.step(xdata, ydata, color=color, label=data_label)
-
- if xlim:
- plt.xlim(xlim)
- if ylim:
- plt.ylim(ylim)
- if xlabel:
- # plt.xlabel(xlabel, fontweight="bold")
- plt.xlabel(xlabel)
- if ylabel:
- # plt.ylabel(ylabel, fontweight="bold")
- plt.ylabel(ylabel)
- if title:
- # plt.title(title, fontweight="bold")
- plt.title(title)
- if legend:
- plt.legend(legend)
-
- plt.grid(grid, which='major')
-
- def scatter(self, xdata, ydata, xlim=None, ylim=None, color=None,
- grid=True, xlabel=None, ylabel=None, title=None, legend=None, data_label=None):
-
- plt.scatter(xdata, ydata, color=color, label=data_label)
- if xlim:
- plt.xlim(xlim)
- if ylim:
- plt.ylim(ylim)
- if xlabel:
- # plt.xlabel(xlabel, fontweight="bold")
- plt.xlabel(xlabel)
- if ylabel:
- # plt.ylabel(ylabel, fontweight="bold")
- plt.ylabel(ylabel)
- if title:
- # plt.title(title, fontweight="bold")
- plt.title(title)
- if legend:
- plt.legend(legend)
-
- plt.grid(grid, which='major')
-
- def plot_bands(self, x_low, x_high, y_low, y_high, color='C0'):
- #pdb.set_trace()
- vertices = np.block([[x_low, x_high[::-1]],
- [y_low, y_high[::-1]]]).T
- codes = Path.LINETO * np.ones(len(vertices), dtype=Path.code_type)
- codes[0] = Path.MOVETO
- path = Path(vertices, codes)
- patch = PathPatch(path, facecolor=color, edgecolor='none', alpha=0.3)
- ax = plt.gca()
- # ax.plot(xdata, ydata)
- ax.add_patch(patch)
-
- def show_fig(self):
- plt.show()
- return
-
- def save_fig(self, filename):
-
- plt.savefig(os.path.join(self.directory,filename), dpi=200)
- plt.close()
- return
-
- def add_legend(self, legend, loc=0):
- plt.legend(legend, loc=loc)
- return
-
- def annotate(self, text, x, y, x_text, y_text, arrowprops=None):
- ax = plt.gca()
- if not arrowprops:
- arrowprops = dict(facecolor='black') # arrowstyle='->'
- ax.annotate(text, xy=(x, y), xytext=(
- x_text, y_text), arrowprops=arrowprops)
diff --git a/dair_pll_old/dair_pll/quaternion.py b/dair_pll_old/dair_pll/quaternion.py
deleted file mode 100644
index c13e426..0000000
--- a/dair_pll_old/dair_pll/quaternion.py
+++ /dev/null
@@ -1,330 +0,0 @@
-r"""Quaternion-based :math:`SO(3)` operations."""
-from typing import TypeVar, cast
-
-import numpy as np
-import torch
-from torch import Tensor
-from typing_extensions import Protocol
-
-#:
-DataType = TypeVar('DataType', Tensor, np.ndarray)
-r"""Static type for both supported types of quaternion/vector
-representations: :class:`~torch.Tensor` and :class:`~numpy.ndarray`\ ."""
-
-
-class TensorCallable(Protocol):
- r"""Static type for callable mapping from list of :class:`~torch.Tensor`\ s
- to :class:`~torch.Tensor`\ ."""
-
- # pylint: disable=too-few-public-methods
- def __call__(self, *args: Tensor) -> Tensor:
- ...
-
-
-class NdarrayCallable(Protocol):
- r"""Static type for callable mapping from list of :class:`~numpy.ndarray`\ s
- to :class:`~numpy.ndarray`\ ."""
-
- # pylint: disable=too-few-public-methods
- def __call__(self, *args: np.ndarray) -> np.ndarray:
- ...
-
-
-def operation_selector(tensor_operation: TensorCallable,
- ndarray_operation: NdarrayCallable,
- *args: DataType) -> DataType:
- r"""Helper function which selects between Pytorch and Numpy
- implementations of a quaternion operation.
-
- Args:
- tensor_operation: :class:`~torch.Tensor`\ -backed implementation.
- ndarray_operation: :class:`~numpy.ndarray`\ -backed implementation.
- *args: Arguments to pass to implementation.
-
- Returns:
- Operation's return value, same type as arguments.
- """
- assert len(args) > 0
- if isinstance(args[0], Tensor):
- return tensor_operation(*args)
-
- assert isinstance(args[0], np.ndarray)
- return ndarray_operation(*args)
-
-
-def inverse_torch(q: Tensor) -> Tensor:
- r""":class:`~torch.Tensor` implementation of :func:`inverse`\ ."""
- assert q.shape[-1] == 4
-
- q_inv = q.clone()
- q_inv[..., 1:] *= -1
- return q_inv
-
-
-def inverse_np(q: np.ndarray) -> np.ndarray:
- r""":class:`~numpy.ndarray` implementation of :func:`inverse`\ ."""
- assert q.shape[-1] == 4
-
- q_inv = np.copy(q)
- q_inv[..., 1:] *= -1
- return q_inv
-
-
-def inverse(q: DataType) -> DataType:
- r"""Quaternion inverse function.
-
- For input quaternion :math:`q = [q_w, q_{xyz}]`\ , returns the inverse
- quaternion :math:`q^{-1} = [-q_w, q_{xyz}]`\ .
-
- Args:
- q: ``(*, 4)`` quaternion batch to invert.
-
- Returns:
- ``(*, 4)`` inverse of ``q``.
- """
- return operation_selector(cast(TensorCallable, inverse_torch),
- cast(NdarrayCallable, inverse_np), q)
-
-
-def multiply_torch(q: Tensor, r: Tensor) -> Tensor:
- r""":class:`~torch.Tensor` implementation of :func:`multiply`\ ."""
- assert q.shape[-1] == 4
- assert r.shape[-1] == 4
-
- q_w = q[..., :1]
- q_xyz = q[..., 1:]
-
- r_w = r[..., :1]
- r_xyz = r[..., 1:]
-
- # pylint: disable=E1103
- qr_w = q_w * r_w - torch.sum(q_xyz * r_xyz, dim=-1, keepdim=True)
- qr_xyz = q_w * r_xyz + r_w * q_xyz + torch.cross(q_xyz, r_xyz, dim=-1)
-
- return torch.cat((qr_w, qr_xyz), dim=-1)
-
-
-def multiply_np(q, r):
- r""":class:`~numpy.ndarray` implementation of :func:`multiply`\ ."""
- assert q.shape[-1] == 4
- assert r.shape[-1] == 4
-
- q_w = q[..., :1]
- q_xyz = q[..., 1:]
-
- r_w = r[..., :1]
- r_xyz = r[..., 1:]
-
- qr_w = q_w * r_w - np.sum(q_xyz * r_xyz, axis=-1, keepdims=True)
- qr_xyz = q_w * r_xyz + r_w * q_xyz + np.cross(q_xyz, r_xyz)
-
- return np.concatenate([qr_w, qr_xyz], axis=-1)
-
-
-def multiply(q: DataType, r: DataType) -> DataType:
- r"""Quaternion multiplication.
-
- Given 2 quaternions :math:`q = [q_w, q_{xyz}]` and :math:`r = [r_w,
- r_{xyz}]`\ , performs the quaternion multiplication via the formula
-
- .. math::
-
- q \times r = \begin{bmatrix} q_w r_w - q_{xyz} \cdot r_{xyz} \\
- q_w r_{xyz} + r_w q_{xyz} + q_{xyz} \times r_{xyz}
- \end{bmatrix}
-
- This formula was taken from the following address:
- https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix
-
- Args:
- q: ``(*, 4)`` left quaternion factor .
- r: ``(*, 4)`` right quaternion factor.
-
- Returns:
- ``(*, 4)`` Product quaternion ``q * r``.
- """
- return operation_selector(cast(TensorCallable, multiply_torch),
- cast(NdarrayCallable, multiply_np), q, r)
-
-
-def rotate_torch(q: Tensor, p: Tensor) -> Tensor:
- r""":class:`~torch.Tensor` implementation of :func:`rotate`\ ."""
- assert q.shape[-1] == 4
- assert p.shape[-1] == 3
-
- q_w = q[..., :1]
- q_xyz = q[..., 1:]
-
- # pylint: disable=E1103
- q_xyz_cross_p = torch.cross(q_xyz, p, dim=-1)
- q_xyz_cross_q_xyz_cross_p = torch.cross(q_xyz, q_xyz_cross_p, dim=-1)
- q_xyz_dot_p = torch.sum(q_xyz * p, dim=-1, keepdim=True)
-
- return q_xyz * (q_xyz_dot_p) + q_w * (2 * q_xyz_cross_p + q_w * p) + \
- q_xyz_cross_q_xyz_cross_p
-
-
-def rotate_np(q: np.ndarray, p: np.ndarray) -> np.ndarray:
- r""":class:`~numpy.ndarray` implementation of :func:`rotate`\ ."""
- assert q.shape[-1] == 4
- assert p.shape[-1] == 3
-
- q_w = q[..., :1]
- q_xyz = q[..., 1:]
-
- q_xyz_cross_p = np.cross(q_xyz, p)
- q_xyz_cross_q_xyz_cross_p = np.cross(q_xyz, q_xyz_cross_p)
- q_xyz_dot_p = np.sum(q_xyz * p, axis=-1, keepdims=True)
-
- return q_xyz * (q_xyz_dot_p) + q_w * (2 * q_xyz_cross_p + q_w * p) + \
- q_xyz_cross_q_xyz_cross_p
-
-
-def rotate(q: DataType, p: DataType) -> DataType:
- r"""Quaternion rotation.
-
- Given a quaternion :math:`q = [q_w, q_{xyz}]` and vector :math:`p`\ ,
- produces the :math:`q`\ -rotated vector :math:`p'` via the formula
-
- .. math::
-
- p' = (q_{xyz} \cdot p) q_{xyz} + 2 q_w (q_{xyz} \times p) +
- q_w^2 p + q_{xyz} \times (q_{xyz} \times p)
-
- This formula was taken from the following address:
- https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix
-
- Args:
- q: ``(*, 4)`` quaternion batch.
- p: ``(*, 3)`` vector batch.
-
- Returns:
- ``(*, 3)`` rotated vector batch.
- """
- return operation_selector(cast(TensorCallable, rotate_torch),
- cast(NdarrayCallable, rotate_np), q, p)
-
-
-def sinc(x: Tensor) -> Tensor:
- r"""Elementwise :math:`\mathrm{sinc}` function.
-
- Given a tensor :math:`x`, applies the elementwise-mapping
-
- .. math::
-
- x \to \begin{cases}1 & x =0 \\ \frac{\sin(x)}{x} & x \neq 0\end{cases}.
-
- Args:
- x: ``(*,)`` :math:`\mathrm{sinc}` input values.
-
- Returns:
- ``(*,)`` :math:`\mathrm{sinc}` function evaluated at ``x``.
- """
- # pylint: disable=E1103
- notnull = torch.abs(x) > 0
- null = torch.logical_not(notnull)
- sinc_x = torch.zeros_like(x)
- sinc_x[null] += 1.
- sinc_x[notnull] += torch.sin(x[notnull]) / (x[notnull])
- return sinc_x
-
-
-def log(q: Tensor) -> Tensor:
- r"""Transforms quaternion into logarithmic coordinates.
-
- Given a quaternion
-
- .. math::
-
- q = [\cos(\theta/2), \hat u \sin(\theta/2)] = [q_w, q_{xyz}],
-
- returns the corresponding logarithmic coordinates (rotation vector)
- :math:`r = \theta\hat u`\ .
-
- This computation is evaluated via the pertations
-
- .. math::
- \begin{align}
- \theta(q) &= 2\mathrm{atan2}(||q_{xyz}||_2, q_w), \\
- r &= \begin{cases} \frac{\theta(q)}{\sin(\theta(q)/2)} q_{xyz} & \sin(
- \theta(q)/2) \neq 0,\\
- 0 & \sin(\theta(q)/2) = 0.\end{cases}
- \end{align}
-
- This function inverts :func:`exp`.
-
- Args:
- q: ``(*, 4)`` quaternion batch.
-
- Returns:
- ``(*, 3)`` rotation vector batch :math:`r`\ .
- """
- assert q.shape[-1] == 4
- cos_half_theta = q[..., 0:1]
- q_xyz = q[..., 1:]
- sin_half_theta = torch.norm(q_xyz, dim=-1, keepdim=True)
-
- # pylint: disable=E1103
- theta = torch.atan2(sin_half_theta, cos_half_theta) * 2
- mul = torch.zeros_like(sin_half_theta)
- not_null = torch.abs(sin_half_theta) > 0
- mul[not_null] = theta[not_null] / sin_half_theta[not_null]
-
- return q_xyz * mul
-
-
-def exp(r: Tensor) -> Tensor:
- r"""Transforms logarithmic coordinates into quaternion.
-
- Given logarithmic coordinates representation (rotation vector)
- :math:`r = \theta\hat u`\, returns the corresponding quaternion
-
- .. math::
-
- q = [\cos(\theta/2), \hat u \sin(\theta/2)] = [q_w, q_{xyz}].
-
-
- This computation is evaluated via the operations
-
- .. math::
- \begin{align}
- \theta(r) &= ||r||_2, \\
- q &= \begin{bmatrix}\cos(\theta(r)/2) \\
- \frac{1}{2} r \mathrm{sinc}(\theta(r)/2)
- \end{bmatrix}.
- \end{align}
-
- This function inverts :func:`log`.
-
- Args:
- r: ``(*, 3)`` rotation vector batch.
-
- Returns:
- ``(*, 4)`` quaternion batch :math:`q`\ .
- """
- assert r.shape[-1] == 3
-
- # pylint: disable=E1103
- angle = torch.norm(r, dim=-1, keepdim=True)
- return torch.cat((torch.cos(angle / 2), r * sinc(angle / 2) / 2), dim=-1)
-
-
-def quaternion_to_rotmat_vec(q: Tensor) -> Tensor:
- """Converts batched quaternions of shape (*, 4) to vectorized rotation
- matrices of shape (*, 9)."""
-
- qr = q[..., 0:1]
- qi = q[..., 1:2]
- qj = q[..., 2:3]
- qk = q[..., 3:4]
- r1 = torch.cat((1. - 2*(qj ** 2 + qk ** 2),
- 2*(qi*qj - qk*qr),
- 2*(qi*qk + qj*qr)), dim=-1)
- r2 = torch.cat((2*(qi*qj + qk*qr),
- 1. - 2*(qi ** 2 + qk ** 2),
- 2*(qj*qk - qi*qr)), dim=-1)
- r3 = torch.cat((2*(qi*qk - qj*qr),
- 2*(qj*qk + qi*qr),
- 1. - 2*(qi ** 2 + qj ** 2)), dim=-1)
-
- return torch.cat((r1, r2, r3), dim=-1)
\ No newline at end of file
diff --git a/dair_pll_old/dair_pll/solvers.py b/dair_pll_old/dair_pll/solvers.py
deleted file mode 100644
index 05ca424..0000000
--- a/dair_pll_old/dair_pll/solvers.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""Convex optimization solver interfaces.
-Current supported problem/solver types:
- * Lorentz cone constrained quadratic program (LCQP) solved with CVXPY.
-"""
-from typing import Dict, List, cast
-
-import cvxpy as cp
-from cvxpylayers.torch import CvxpyLayer
-from torch import Tensor
-
-_CVXPY_LCQP_EPS = 0. #1e-7
-#_CVXPY_SOLVER_ARGS = {"solve_method": "SCS", "eps": 1e-10, "use_indirect":
-# True}
-_CVXPY_SOLVER_ARGS = {"solve_method": "ECOS", "max_iters": 300,
- "abstol": 1e-10, "reltol": 1e-10, "feastol": 1e-10}
-
-def construct_cvxpy_lcqp_layer(num_contacts: int,
- num_velocities: int) -> CvxpyLayer:
- """Constructs a CvxpyLayer for solving a Lorentz cone constrained quadratic
- program.
- Args:
- num_contacts: number of contacts to be considered in the LCQP.
- num_velocities: number of generalized velocities.
- Returns:
- CvxpyLayer for solving a LCQP.
- """
- num_variables = 3 * num_contacts
-
- variables = cp.Variable(num_variables)
- objective_matrix = cp.Parameter((num_variables, num_velocities))
- objective_vector = cp.Parameter(num_variables)
-
- objective = 0.5 * cp.sum_squares(objective_matrix.T @ variables)
- objective += objective_vector.T @ variables
- if _CVXPY_LCQP_EPS > 0.:
- objective += 0.5 * _CVXPY_LCQP_EPS * cp.sum_squares(variables)
- constraints = [
- cp.SOC(variables[3 * i + 2], variables[(3 * i):(3 * i + 2)])
- for i in range(num_contacts)
- ]
-
- problem = cp.Problem(cp.Minimize(objective),
- cast(List[cp.Constraint], constraints))
- return CvxpyLayer(problem,
- parameters=[objective_matrix, objective_vector],
- variables=[variables])
-
-
-class DynamicCvxpyLCQPLayer:
- """Solves a LCQP with dynamic sizing by maintaining a family of
- constant-size ``CvxpyLayer`` s."""
- num_velocities: int
- _cvxpy_layers: Dict[int, CvxpyLayer]
-
- def __init__(self, num_velocities: int):
- """
- Args:
- num_velocities: number of generalized velocities.
- """
- self.num_velocities = num_velocities
- self._cvxpy_layers = {}
-
- def get_sized_layer(self, num_contacts: int) -> CvxpyLayer:
- """Returns a ``CvxpyLayer`` for solving a LCQP with ``num_contacts``
- contacts.
- Args:
- num_contacts: number of contacts to be considered in the LCQP.
- Returns:
- CvxpyLayer for solving a LCQP.
- """
- if num_contacts not in self._cvxpy_layers:
- self._cvxpy_layers[num_contacts] = construct_cvxpy_lcqp_layer(
- num_contacts, self.num_velocities)
- return self._cvxpy_layers[num_contacts]
-
- def __call__(self, J: Tensor, q: Tensor) -> Tensor:
- """Solve an LCQP.
- Args:
- J: (*, 3 * num_contacts, num_velocities) Cost matrices.
- q: (*, 3 * num_contacts) Cost vectors.
- Returns:
- LCQP solution impulses.
- """
- assert J.shape[-1] == self.num_velocities
- assert q.shape[-1] == J.shape[-2]
- assert J.shape[-2] % 3 == 0
-
- layer = self.get_sized_layer(J.shape[-2] // 3)
- #pdb.set_trace()
- return layer(J, q, solver_args=_CVXPY_SOLVER_ARGS)[0]
-
\ No newline at end of file
diff --git a/dair_pll_old/dair_pll/state_space.py b/dair_pll_old/dair_pll/state_space.py
deleted file mode 100644
index 1e94dfd..0000000
--- a/dair_pll_old/dair_pll/state_space.py
+++ /dev/null
@@ -1,955 +0,0 @@
-r"""Classes and utilities for operation on Lie group/algebra state spaces.
-
-This module implements a :py:class:`StateSpace` abstract type which defines
-fundamental operations on states in spaces that are not necessarily
-Euclidean. In general, we assume configurations spaces are Lie groups,
-which together with their associated Lie algebra form a state space. We
-implement associated operations on these spaces.
-
-Of note is the :py:class:`FloatingBaseSpace`, with configurations in the
-Cartesian product of :math:`SE(3)` and :math:`\mathbb{R}^m`. This space receives
-the particular implementation of representing the :math:`SO(3)`
-configuration as a quaternion and the Lie algebra velocity/tangent space as
-the body-axes angular velocity / rotation vector.
-
-This is also the place where batching dimensions are defined for states. By
-convention, the state element index is always the last dimension of the
-tensor, and when states are batched in time, time is the second-to-last index.
-"""
-from abc import ABC, abstractmethod
-from typing import List, Tuple, Callable, Dict, cast
-
-import torch
-from torch import Tensor
-
-from dair_pll import quaternion
-
-N_QUAT = 4
-N_ANG_VEL = 3
-N_COM = 3
-
-ComparisonCallable = Callable[[Tensor, Tensor], Tensor]
-ComparisonDict = Dict[str, ComparisonCallable]
-# pylint: disable=E1103
-Size = torch.Size
-
-
-def partial_sum_batch(summands: Tensor, keep_batch: bool = False) -> Tensor:
- """Sums over a batch, possibly keeping the first batching dimension.
-
- Args:
- summands: ``(b_1, ..., b_j, n_1, ..., n_k)`` tensor batch of tensors
- keep_batch: whether to keep the first batching dimension
-
- Returns:
- sum of x as scalar tensor, or ``(b_1,)`` tensor if
- ``keep_batch == True``.
- """
- if keep_batch:
- while len(summands.shape) > 1:
- summands = summands.sum(dim=-1)
- return summands
- return summands.sum()
-
-
-class StateSpace(ABC):
- r"""Mathematical model of a state space.
-
- Each state space is modeled as the Cartesian product of a connected Lie
- group :math:`G` and its associated Lie algebra :math:`\mathfrak g` (
- equivalently up to diffeomorphism, the tangent bundle :math:`TG`).
- The Lie group element may be given non-minimal coordinates,
- e.g. representing SO(3) with quaternions. As :math:`\mathfrak g`
- is a vector space, :math:`G \times \mathfrak g` itself is also a Lie group.
-
- The following assumptions about the group :math:`G` and algebra g are made:
-
- * The Lie group exponential map :math:`\exp: \mathfrak g \to G`
- is surjective/onto, such that a left inverse
- :math:`\log: G \to \mathfrak g` can be defined, i.e.
- :math:`\exp(\log(g)) = g`.
- * The Lie group exponential map coincides with the underlying
- manifold's Riemannian geometric exponential map, such that the
- geodesic distance from :math:`g_1` to :math:`g_1` is
- :math:`|\log(g_2 \cdot g_1^{-1})|`
-
- These conditions are met if and only if :math:`G` is the Cartesian
- product of a compact group and an Abelian group :cite:p:`Milnor1976` -- For
- example, :math:`SO(3)\times\mathbb{R}^n`.
-
- For each concrete class inheriting from :py:class:`StateState`,
- a few fundamental mathematical operators associated with Lie groups must
- be defined on these coordinates. :py:class:`StateState` defines several
- other group operations from these units.
- """
- n_q: int
- n_v: int
- n_x: int
- comparisons: ComparisonDict
-
- def __init__(self, n_q: int, n_v: int) -> None:
- """
- Args:
- n_q: number of Lie group (configuration) coordinates
- (:math:`>= 0`)
- n_v: number of Lie algebra (velocity) coordinates (:math:`>= 0`)
- """
- assert n_q >= 0
- assert n_v >= 0
- super().__init__()
- self.n_q = n_q
- self.n_v = n_v
- self.n_x = n_q + n_v
- self.comparisons = {}
-
- @abstractmethod
- def configuration_difference(self, q_1: Tensor, q_2: Tensor) -> Tensor:
- r"""Returns the relative transformation between ``q_1`` and ``q_2``.
-
- Specifically, as :math:`G` is a Lie group, it has a well-defined inverse
- operator. This function returns :math:`dq = \log(q_2 \cdot q_1^{-1})`,
- i.e. the Lie algebra element such that :math:`q_1 \exp(dq) = q_2`.
-
- This method has a corresponding "inverse" function
- :py:meth:`exponential`.
-
- Args:
- q_1: ``(*, n_q)`` "starting" configuration, element(s) of Lie
- group :math:`G`.
- q_2: ``(*, n_q)`` "ending" configuration, element(s) of Lie group
- :math:`G`.
-
- Returns:
- ``(*, n_v)`` element of Lie algebra g defining the transformation
- from ``q_1`` to ``q_2``
- """
-
- @abstractmethod
- def exponential(self, q: Tensor, dq: Tensor) -> Tensor:
- """Evolves ``q`` along the Lie group G in the direction ``dq``.
-
- This function implements the inverse of
- :py:meth:`configuration_difference` by returning q * exp(dq).
-
- Args:
- q: ``(*, n_q)`` "starting" configuration, element(s) of Lie group G
- dq: ``(*, n_v)`` perturbation, element(s) of Lie algebra g
- Returns:
- ``(*, n_q)`` group product of q and exp(dq)
- """
-
- @abstractmethod
- def project_configuration(self, q: Tensor) -> Tensor:
- """Projects a tensor of size ``(*, n_q)`` onto the Lie group G.
-
- This function is used, mostly for numerical stability, to ensure a
- ``(*, n_q)`` tensor corresponds to Lie group elements. While not
- necessarily a Euclidean projection, this function should be:
-
- * The identity on G, i.e. ``q = projection_configuration(q)``
- * Continuous
- * (Piecewise) differentiable near G
-
- Args:
- q: ``(*, n_q)`` vectors to project onto G.
-
- Returns:
- ``(*, n_q)`` projection of ``q`` onto G.
- """
-
- @abstractmethod
- def zero_state(self) -> Tensor:
- """Identity element of the Lie group G.
-
- Entitled "zero state" as the group operation is typically thought of
- as addition.
-
- Returns:
- ``(n_x,)`` tensor group identity
- """
-
- def q(self, x: Tensor) -> Tensor:
- """Selects configuration indices from state(s) ``x``"""
- assert x.shape[-1] == self.n_x
- return x[..., :self.n_q]
-
- def v(self, x: Tensor) -> Tensor:
- """Selects velocity indices from state(s) ``x``"""
- assert x.shape[-1] == self.n_x
- return x[..., self.n_q:]
-
- def q_v(self, x: Tensor) -> Tuple[Tensor, Tensor]:
- """Separates state(s) ``x`` into configuration and velocity"""
- assert x.shape[-1] == self.n_x
- return self.q(x), self.v(x)
-
- def x(self, q: Tensor, v: Tensor) -> Tensor:
- """Concatenates configuration ``q`` and velocity ``v`` into a state"""
- assert q.shape[-1] == self.n_q
- assert v.shape[-1] == self.n_v
-
- # pylint: disable=E1103
- return torch.cat((q, v), dim=-1)
-
- def config_square_error(self,
- q_1: Tensor,
- q_2: Tensor,
- keep_batch: bool = False) -> Tensor:
- r"""Returns squared distance between two Lie group
- elements/configurations.
-
- Interprets an :math:`l_2`-like error between two configurations as the
- square of the geodesic distance between them. This is simply equal to
- :math:`|\log(q_2 \mathrm{inv}(q_1))|^2` under the assumptions about G.
-
- Args:
- q_1: ``(b_1, ..., b_k, n_q)`` "starting" configuration
- q_2: ``(b_1, ..., b_k, n_q)`` "ending" configuration
- keep_batch: whether to keep the outermost batch
-
- Returns:
- ``(b_1,)`` or scalar tensor of squared geodesic distances
- """
- assert q_1.shape[-1] == self.n_q
- assert q_2.shape[-1] == self.n_q
- return partial_sum_batch(
- self.configuration_difference(q_1, q_2)**2, keep_batch)
-
- def velocity_square_error(self,
- v_1: Tensor,
- v_2: Tensor,
- keep_batch: bool = False) -> Tensor:
- """Returns squared distance between two Lie algebra
- elements/velocities.
-
- As the Lie algebra is a vector space, the squared error is
- interpreted as the geodesic/Euclidean distance
-
- Args:
- v_1: ``(b_1, ..., b_k, n_v)`` "starting" velocity
- v_2: ``(b_1, ..., b_k, n_v)`` "ending" velocity
- keep_batch: whether to keep the outermost batch
-
- Returns:
- ``(b_1,)`` or scalar tensor of squared geodesic distances.
- """
- assert v_1.shape[-1] == self.n_v
- assert v_2.shape[-1] == self.n_v
- return partial_sum_batch((v_2 - v_1)**2, keep_batch)
-
- def state_square_error(self,
- x_1: Tensor,
- x_2: Tensor,
- keep_batch: bool = False) -> Tensor:
- """Returns squared distance between two states, which are in the
- cartesian product G x g.
-
- As g is a vector space, it is Abelian, and thus G x g is the product
- of a compact group and an Abelian group. We can then define the
- geodesic distance as::
-
- dist(x_1, x_2)^2 == dist(q(x_1), q(x_2))^2 + dist(v(x_1), v(x_2))^2
-
- Args:
- x_1: ``(b_1, ..., b_k, n_x)`` "starting" state
- x_2: ``(b_1, ..., b_k, n_x)`` "ending" state
- keep_batch: whether to keep the outermost batch
-
- Returns:
- ``(b_1,)`` or scalar tensor of squared geodesic distances
- """
- assert x_1.shape[-1] == self.n_x
- assert x_2.shape[-1] == self.n_x
- q_1, v_1 = self.q_v(x_1)
- q_2, v_2 = self.q_v(x_2)
- return self.config_square_error(
- q_1, q_2, keep_batch) + self.velocity_square_error(
- v_1, v_2, keep_batch)
-
- def auxiliary_comparisons(
- self) -> Dict[str, Callable[[Tensor, Tensor], Tensor]]:
- """Any additional useful comparisons between pairs of states"""
- return self.comparisons
-
- def finite_difference(self, q: Tensor, q_plus: Tensor, dt: float) -> Tensor:
- """Rate of change of configuration
-
- Interprets the rate of change of ``q`` as an element of the Lie
- algebra ``v``, such that q_plus == q * exp(v * dt).
-
- :py:meth:`finite_difference` has a corresponding "inverse" function
- :py:meth:`euler_step`.
-
- Args:
- q: ``(*, n_q)`` "starting" configuration, element(s) of Lie group G
- q_plus: ``(*, n_q)`` "ending" configuration, element(s) of Lie group G
- dt: time difference in [s]
-
- Returns:
- ``(*, n_v)`` finite-difference velocity, element(s) of Lie algebra g
- """
- assert q.shape[-1] == self.n_q
- assert q_plus.shape[-1] == self.n_q
- return self.configuration_difference(q, q_plus) / dt
-
- def euler_step(self, q: Tensor, v: Tensor, dt: float) -> Tensor:
- """Integrates ``q`` forward in time given derivative ``v``.
-
- Implements the inverse of :py:meth:`finite_difference` by returning
- q * exp(v * dt), a geodesic forward Euler step.
-
- Args:
- q: ``(*, n_q)`` "starting" configuration, element(s) of Lie group G
- v: ``(*, n_v)`` "starting" velocity, element(s) of Lie algebra g
- dt: time difference in [s]
-
- Returns:
- ``(*, n_q)`` configuration after Euler step.
- """
- assert q.shape[-1] == self.n_q
- assert v.shape[-1] == self.n_v
- return self.exponential(q, v * dt)
-
- def state_difference(self, x_1: Tensor, x_2: Tensor) -> Tensor:
- """Returns the relative transformation between ``x_1`` and ``x_2``
-
- As G x g is a Lie group, we can interpret the difference between two
- states via its corresponding Lie algebra, just as in
- :py:meth:`configuration_difference`, as log(x_2 / x_1).
-
- :py:meth:`state_difference` has a corresponding "inverse" function
- :py:meth:`shift_state`.
-
- Args:
- x_1: ``(*, n_x)`` "starting" state, element(s) of Lie group G x g
- x_2: ``(*, n_x)`` "ending" state, element(s) of Lie group G x g
-
- Returns:
- ``(*, n_x)`` element of Lie algebra g x R^n_v defining the
- transformation from ``x_1`` to ``x_2``
- """
- assert x_1.shape[-1] == self.n_x
- assert x_2.shape[-1] == self.n_x
- q_1, v_1 = self.q_v(x_1)
- q_2, v_2 = self.q_v(x_2)
- dq = self.configuration_difference(q_1, q_2)
- dv = v_2 - v_1
-
- # pylint: disable=E1103
- return torch.cat((dq, dv), dim=-1)
-
- def shift_state(self, x: Tensor, dx: Tensor) -> Tensor:
- """Evolves ``x`` along the Lie group G in the direction ``dx``.
-
- This function implements the inverse of :py:meth:`state_difference`
- by returning q * exp(dq).
-
- Args:
- x: ``(*, n_x)`` "starting" state, element(s) of Lie group G x g
- dx: ``(*, 2 * n_v)`` perturbation, element(s) of Lie algebra g x R^n_v
- Returns:
- ``(*, n_q)`` group product of q and exp(dq).
- """
- assert x.shape[-1] == self.n_x
- assert dx.shape[-1] == (2 * self.n_v)
- q, v = self.q_v(x)
-
- dq = dx[..., :self.n_v]
- dv = dx[..., self.n_v:]
-
- q_new = self.exponential(q, dq)
- v_new = v + dv
- return self.x(q_new, v_new)
-
- def project_state(self, x: Tensor) -> Tensor:
- """Projects a tensor of size (\*, n_x) onto the state space G x g.
-
- This function has the same basic requirements as
- :py:meth:`project_configuration` translated to the lie group G x g.
-
- Args:
- x: ``(*, n_x)`` vectors to project onto G x g.
-
- Returns:
- ``(*, n_x)`` tensor, projection of ``x`` onto G x g.
- """
- assert x.shape[-1] == self.n_x
- return self.x(self.project_configuration(self.q(x)), self.v(x))
-
- def project_derivative(self, x: Tensor, dt: float) -> Tensor:
- """Changes velocity sequence in ``x`` to a finite difference.
-
- Extracts configurations q_t from trajectory(ies) and replaces
- velocities v_i with ``finite_difference(q_{i-1}, q_i, dt)``.
-
- Args:
- x: ``(*, T, n_x)`` trajectories
- dt: time-step
-
- Returns:
- ``(*, T, n_x)`` trajectories with finite-difference velocities.
- """
- assert x.shape[-1] == self.n_x
- assert x.dim() >= 2 # must have time indexing
- assert x.shape[-2] > 1 # must have multiple time steps
- q = self.q(x)
- q_pre = q[..., :(-1), :]
- q_plus = q[..., 1:, :]
- v_plus = self.finite_difference(q_pre, q_plus, dt)
- return self.x(q_plus, v_plus)
-
-
-class FloatingBaseSpace(StateSpace):
- """State space with configurations in SE(3) x R^n_joints.
-
- Called :py:class:`FloatingBaseSpace` as it is the state space of an open
- kinematic chain with a free-floating base body.
-
- Coordinates for SO(3) are unit quaternions, with remaining states
- represented as R^{3 + n_joints}.
- """
-
- def __init__(self, n_joints: int) -> None:
- """Inits `:py:class:`FloatingBaseSpace` of prescribed size.
-
- The floating base has configurations in SE(3), with 4 quaternion + 3
- world-axes position configuration coordinates and 3 body-axes angular
- velocity + 3 world-axes linear velocity. Each joint is represented as a
- single real number.
-
- Args:
- n_joints: number of joints in chain (>= 0)
- """
- assert n_joints >= 0
- super().__init__(7 + n_joints, 6 + n_joints)
- self.comparisons.update({
- 'rot_err': self.quaternion_error,
- 'pos_err': self.base_error,
- })
-
- def quat(self, q_or_x: Tensor) -> Tensor:
- """select quaternion elements from configuration/state"""
- assert q_or_x.shape[-1] == self.n_q or q_or_x.shape[-1] == self.n_x
- return q_or_x[..., :N_QUAT]
-
- def base(self, q_or_x: Tensor) -> Tensor:
- """select floating base position elements from configuration/state"""
- assert q_or_x.shape[-1] == self.n_q or q_or_x.shape[-1] == self.n_x
- return q_or_x[..., N_QUAT:(N_QUAT + N_COM)]
-
- def configuration_difference(self, q_1: Tensor, q_2: Tensor) -> Tensor:
- """Implements configuration offset for a floating-base rigid chain.
-
- exp() map of SO(3) corresponds to the space of rotation
- vectors, or equivalently the matrix group so(3); therefore, the first 3
- elements of the return value are body-axes rotation vectors.
-
- Args:
- q_1: ``(*, n_q)`` "starting" configuration in SE(3) x R^n_joints
- q_2: ``(*, n_q)`` "ending" configuration, SE(3) x R^n_joints
-
- Returns:
- ``(*, n_v)`` body-axes rotation vector, world-axes linear
- displacement, and joint offsets.
- """
- assert q_1.shape[-1] == self.n_q
- assert q_2.shape[-1] == self.n_q
- quat1 = self.quat(q_1)
- quat2 = self.quat(q_2)
- linear_shift = q_2[..., N_QUAT:] - q_1[..., N_QUAT:]
- quat_shift = quaternion.multiply(quaternion.inverse(quat1), quat2)
- rot = quaternion.log(quat_shift)
-
- # pylint: disable=E1103
- return torch.cat((rot, linear_shift), dim=-1)
-
- def exponential(self, q: Tensor, dq: Tensor) -> Tensor:
- """Implements exponential perturbation for a floating-base rigid chain.
-
- This function implements the inverse of :py:meth:`configuration_difference`
- by rotating ``quat(q)`` around the body-axis rotation vector in
- ``dq``, and adding a linear offset to the remaining coordinates.
-
- Args:
- q: ``(*, n_q)`` "starting" configuration in SE(3) x R^n_joints
- dq: ``(*, n_v)`` perturbation in se(3) x R^n_joints
- Returns:
- ``(*, n_q)`` perturbed quaternion, world-axes floating base origin
- """
- assert q.shape[-1] == self.n_q
- assert dq.shape[-1] == self.n_v
- linear_plus = q[..., N_QUAT:] + dq[..., N_ANG_VEL:]
- delta_quat = quaternion.exp(dq[..., :N_ANG_VEL])
- quat_plus = quaternion.multiply(q[..., :N_QUAT], delta_quat)
-
- # pylint: disable=E1103
- return torch.cat((quat_plus, linear_plus), dim=-1)
-
- def project_configuration(self, q: Tensor) -> Tensor:
- """Implements projection onto the floating-base rigid chain
- configuration space.
-
- This function projects a ``(*, n_q)`` tensor onto SE(3) x R^n_joints by
- simply normalizing the quaternion elements.
-
- Args:
- q: ``(*, n_q)`` vectors to project onto SE(3) x R^n_joints.
-
- Returns:
- ``(*, n_q)`` tensor, projection of ``q`` onto SE(3) x R^n_joints.
- """
- assert q.shape[-1] == self.n_q
- quats = q[..., :N_QUAT] / torch.linalg.norm(q[..., :N_QUAT], dim=-1)
-
- # pylint: disable=E1103
- return torch.cat((quats, q[..., N_QUAT:]), dim=-1)
-
- def zero_state(self) -> Tensor:
- """Identity element of SE(3) x R^n_joints.
-
- Returns:
- Concatenation of identity quaternion [1, 0, 0, 0] with
- ``(n_joints + 3)`` zeros.
- """
- # pylint: disable=E1103
- zero = torch.zeros((self.n_x,))
- zero[0] = 1.
- return zero
-
- def quaternion_error(self, x_1: Tensor, x_2: Tensor) -> Tensor:
- """Auxiliary comparison that returns floating base orientation geodesic
- distance.
-
- Returns a scalar comparison of two floating base rigid chain states
- by the angle of rotation between their base orientations.
-
- Args:
- x_1: ``(*, n_x)`` "starting" state
- x_2: ``(*, n_x)`` "ending" state
-
- Returns:
- scalar tensor, average angle of rotation in batch.
-
- Todo:
- Properly handle multiple batching dimensions.
- """
- assert x_1.shape[-1] == self.n_x
- assert x_2.shape[-1] == self.n_x
- assert len(x_1.shape) == 2 # hack for now
- quat1 = self.quat(x_1)
- quat2 = self.quat(x_2)
- quat_shift = quaternion.multiply(quaternion.inverse(quat1), quat2)
- rot = quaternion.log(quat_shift)
-
- # pylint: disable=E1103
- return torch.sqrt((rot**2).sum(dim=-1)).sum() / x_1.shape[0]
-
- def base_error(self, x_1: Tensor, x_2: Tensor) -> Tensor:
- """Auxiliary comparison that returns floating base translation geodesic
- distance.
-
- Returns a scalar comparison of two floating base rigid chain states
- by the Euclidean between their bases.
-
- Args:
- x_1: ``(*, n_x)`` "starting" state
- x_2: ``(*, n_x)`` "ending" state
-
- Returns:
- scalar tensor, average translation in batch.
-
- Todo:
- Properly handle multiple batching dimensions.
- """
- assert x_1.shape[-1] == self.n_x
- assert x_2.shape[-1] == self.n_x
- assert len(x_1.shape) == 2
- base1 = self.base(x_1)
- base2 = self.base(x_2)
- pos = base1 - base2
-
- # pylint: disable=E1103
- return torch.sqrt((pos**2).sum(dim=-1)).sum() / x_1.shape[0]
-
-
-class FixedBaseSpace(StateSpace):
- """State space with configurations in R^n_joints.
-
- Called :py:class:`FixedBaseSpace` as it is the state space of an open
- kinematic chain with fixed base body.
-
- As the Lie group R^n_joints is equivalent to its own algebra, the state
- space is simply R^{2 * n_joints}, and the group operation coincides with
- addition on this vector space. Thus::
-
- n_q == n_v == n_x/2
- """
-
- def __init__(self, n_joints: int) -> None:
- """Inits :py:class:`FixedBaseSpace` of prescribed size.
-
- Args:
- n_joints: number of joints in chain (>= 0)
- """
- assert n_joints >= 0
- super().__init__(n_joints, n_joints)
-
- def configuration_difference(self, q_1: Tensor, q_2: Tensor) -> Tensor:
- """Implements configuration offset for a fixed-base rigid chain.
-
- In R^n_joints, this is simply vector subtraction.
-
- Args:
- q_1: ``(*, n_q)`` "starting" configuration in R^n_joints
- q_2: ``(*, n_q)`` "ending" configuration in R^n_joints
-
- Returns:
- (\*, n_v) difference of configurations
- """
- assert q_1.shape[-1] == self.n_q
- assert q_2.shape[-1] == self.n_q
- return q_2 - q_1
-
- def exponential(self, q: Tensor, dq: Tensor) -> Tensor:
- """Implements exponential perturbation for a fixed-base rigid chain.
-
- In R^n_joints, this is simply vector addition.
-
- Args:
- q: ``(*, n_q)`` "starting" configuration in R^n_joints
- dq: ``(*, n_v)`` perturbation in R^n_joints
- Returns:
- ``(*, n_q)`` perturbed configuration
- """
- assert q.shape[-1] == self.n_q
- assert dq.shape[-1] == self.n_v
- return q + dq
-
- def project_configuration(self, q: Tensor) -> Tensor:
- """Implements projection onto the fixed-base rigid chain
- configuration space.
-
- In R^n_joints, this is simply the identity function.
-
- Args:
- q: ``(*, n_q)`` vectors in R^n_joints.
-
- Returns:
- ``(*, n_q)`` tensor, ``q``.
- """
- assert q.shape[-1] == self.n_q
- return q
-
- def zero_state(self) -> Tensor:
- """Zero element of R^n_x"""
-
- # pylint: disable=E1103
- return torch.zeros((self.n_x,))
-
-
-class ProductSpace(StateSpace):
- """State space constructed as the Cartesian product of subspaces.
-
- The product space conforms with the required properties of our Lie group
- as long as each constituent subspace does as well."""
-
- def __init__(self, spaces: List[StateSpace]) -> None:
- """Inits :py:class:`ProductSpace` from given factors.
-
- The coordinates of each space in ``spaces`` are concatenated to
- construct the product space's coordinates, and similar for
- the velocities."""
- n_qs = [space.n_q for space in spaces]
- n_vs = [space.n_v for space in spaces]
- n_xs = [space.n_x for space in spaces]
- n_q = sum(n_qs)
- n_v = sum(n_vs)
-
- super().__init__(n_q, n_v)
- # pylint: disable=E1103
- self.q_splits = torch.cumsum(torch.tensor(n_qs), 0)[:-1]
- self.v_splits = torch.cumsum(torch.tensor(n_vs), 0)[:-1]
- self.x_splits = torch.cumsum(torch.tensor(n_xs), 0)[:-1]
- self.spaces = spaces
-
- def q_split(self, q: Tensor) -> List[Tensor]:
- """Splits configuration into list of subspace configurations."""
- assert q.shape[-1] == self.n_q
-
- # pylint: disable=E1103
- return cast(list, torch.tensor_split(q, self.q_splits, -1))
-
- def v_split(self, v: Tensor) -> List[Tensor]:
- """Splits velocity into list of subspace velocities."""
- assert v.shape[-1] == self.n_v
-
- # pylint: disable=E1103
- return cast(list, torch.tensor_split(v, self.v_splits, -1))
-
- def x_split(self, x: Tensor) -> List[Tensor]:
- """Splits state into list of subspace states."""
- assert x.shape[-1] == self.n_x
-
- # pylint: disable=E1103
- return cast(list, torch.tensor_split(x, self.x_splits, -1))
-
- def configuration_difference(self, q_1: Tensor, q_2: Tensor) -> Tensor:
- """Constructs configuration difference as concatenation of subspace
- configuration differences."""
- assert q_1.shape[-1] == self.n_q
- assert q_2.shape[-1] == self.n_q
- diffs = [
- space.configuration_difference(q_1i, q_2i)
- for space, q_1i, q_2i in zip(self.spaces, self.q_split(q_1),
- self.q_split(q_2))
- ]
- # pylint: disable=E1103
- return torch.cat(diffs, dim=-1)
-
- def exponential(self, q: Tensor, dq: Tensor) -> Tensor:
- """Constructs perturbed configuration as concatenation of perturbed
- subspace configurations"""
- assert q.shape[-1] == self.n_q
- assert dq.shape[-1] == self.n_v
- exps = [
- space.exponential(qi, dqi) for space, qi, dqi in zip(
- self.spaces, self.q_split(q), self.v_split(dq))
- ]
- # pylint: disable=E1103
- return torch.cat(exps, dim=-1)
-
- def project_configuration(self, q: Tensor) -> Tensor:
- """Projects configuration onto Lie group by projecting each subspace's
- configuration onto its respective subgroup."""
- assert q.shape[-1] == self.n_q
- projections = [
- space.project_configuration(qi)
- for space, qi in zip(self.spaces, self.q_split(q))
- ]
- # pylint: disable=E1103
- return torch.cat(projections, dim=-1)
-
- def zero_state(self) -> Tensor:
- """Constructs zero state as concatenation of subspace zeros"""
- zeros = [space.zero_state() for space in self.spaces]
-
- # pylint: disable=E1103
- q = torch.cat(
- [space.q(zero) for space, zero in zip(self.spaces, zeros)], dim=-1)
- v = torch.cat(
- [space.v(zero) for space, zero in zip(self.spaces, zeros)], dim=-1)
- return torch.cat((q, v), dim=-1)
-
-
-def centered_uniform(size: Size) -> Tensor:
- """Uniform distribution on zero-centered box [-1, 1]^size"""
- # pylint: disable=E1103
- return 2. * torch.rand(size) - 1.
-
-
-class WhiteNoiser:
- r"""Helper class for adding artificial noise to state batches.
-
- Defines an interface for noise distortion of a batch of states. Noise is
- modeled as a zero-mean distribution on the Lie algebra of the state space,
- :math:`\mathbb{R}^{2 n_v}`. Note that this means that velocities receive
- noise independent to the configuration, and thus may break the
- finite-difference relationship in a trajectory."""
- space: StateSpace
- ranges: Tensor
- variance_factor: float
-
- def __init__(self,
- space: StateSpace,
- unit_noise: Callable[[Size], Tensor],
- variance_factor: float = 1) -> None:
- """Inits a :py:class:`WhiteNoiser` of specified distribution.
-
- Args:
- space: State space upon which
- unit_noise: Callback, returns coordinate-independent noise of
- nominal unit size.
- variance_factor: Variance of a single coordinate's unit-scale noise.
- """
- super().__init__()
- self.space = space
- self.unit_noise = unit_noise
- self.variance_factor = variance_factor
-
- def noise(self,
- x: Tensor,
- ranges: Tensor,
- independent: bool = True) -> Tensor:
- """Adds noise to a given batch of states.
-
- Uses the ``unit_noise()`` to get. Optionally, adds identical
- distortion to each state in the batch, or i.i.d. noise to each state.
-
- Args:
- x: ``(*, space.n_x)`` batch of states to distort with noise.
- ranges: ``(2 * space.n_v,)`` multiplicative scale of noise.
- independent: whether to independently distort each state.
-
- Returns:
- ``(*, space.n_x)`` distorted batch of states.
- """
- dx_shape = x.shape[:-1] + (2 * self.space.n_v,)
- if independent:
- noise_shape = torch.Size(dx_shape)
- else:
- noise_shape = torch.Size((2 * self.space.n_v,))
-
- noise = torch.zeros(dx_shape)
- noise += self.unit_noise(noise_shape) * ranges
- return self.space.shift_state(x, noise)
-
- def covariance(self, ranges: Tensor) -> Tensor:
- """State covariance matrix associated with noise scale.
-
- Args:
- ranges: ``(2 * space.n_v,)`` multiplicative scale of noise.
-
- Returns:
- ``(2 * space.n_v, 2 * space.n_v)`` covariance matrix on state space
- Lie algebra.
- """
- return torch.diag(self.variance_factor * (ranges**2))
-
-
-class UniformWhiteNoiser(WhiteNoiser):
- """Convenience :py:class:`WhiteNoiser` class for uniform noise."""
-
- def __init__(self, space: StateSpace) -> None:
- super().__init__(space, centered_uniform, 1. / 3.)
-
-
-class GaussianWhiteNoiser(WhiteNoiser):
- """Convenience :py:class:`WhiteNoiser` class for Gaussian noise."""
-
- def __init__(self, space: StateSpace) -> None:
- super().__init__(space, torch.randn)
-
-
-class StateSpaceSampler(ABC):
- """Abstract utility class for sampling on a state space."""
- space: StateSpace
-
- def __init__(self, space: StateSpace) -> None:
- """Inits :py:class:`StateSpaceSampler` on prescribed space.
-
- Args:
- space: State space of sampler.
- """
- super().__init__()
- self.space = space
-
- @abstractmethod
- def get_sample(self) -> Tensor:
- """Get sample from state distribution.
-
- Returns:
- (space.n_x,) state sample.
- """
-
- @abstractmethod
- def covariance(self) -> Tensor:
- r"""Returns covariance of state space distribution.
-
- Interprets the distribution in logarithmic coordinates (the Lie
- algebra of the state space), and returns a covariance matrix in
- :math:`\mathbb{R}^{2 n_v \times 2 n_v}`.
-
- Returns:
- (2 * space.n_v, 2 * space.n_v) distribution covariance.
- """
-
-
-class ConstantSampler(StateSpaceSampler):
- """Convenience :py:class:`StateSpaceSampler` for returning constant
- state."""
- space: StateSpace
- x_0: Tensor
-
- def __init__(self, space: StateSpace, x_0: Tensor) -> None:
- """Inits :py:class:`ConstantSampler` with specified constant state.
-
- Args:
- space: Sampler's state space.
- x_0: ``(space.n_x,)`` singleton support of underlying probability
- distribution.
- """
- super().__init__(space)
- self.x_0 = x_0
-
- def get_sample(self) -> Tensor:
- """Returns copy of constant ``x_0``."""
- return self.x_0.clone()
-
- def covariance(self) -> Tensor:
- """Returns zero covariance."""
- return torch.zeros((2 * self.space.n_v, 2 * self.space.n_v))
-
-
-class ZeroSampler(ConstantSampler):
- """Convenience :py:class:`ConstantSampler` for returning zero state."""
-
- def __init__(self, space: StateSpace) -> None:
- super().__init__(space, space.zero_state())
-
-
-class CenteredSampler(StateSpaceSampler):
- """State space sampling distribution centered around specified state.
-
- Implemented by sampling the state, and perturbing it with specified white
- noise.
- """
- ranges: Tensor
- x_0: Tensor
- variance_factor: float
-
- def __init__(self,
- space: StateSpace,
- ranges: Tensor,
- unit_noise: Callable[[Size], Tensor] = torch.randn,
- x_0: Tensor = None) -> None:
- """Inits :py:class:`CenteredSampler` with specified distribution
-
- Args:
- space: Sampler's state space.
- ranges: ``(2 * space.n_v,)`` multiplicative scale on noise
- distribution standard deviation.
- unit_noise: Callback, returns coordinate-independent noise of
- nominal unit size.
- x_0: ``(space.n_x,)`` center of distribution, around which Lie
- algebra perturbation is applied by underlying
- :py:class:`WhiteNoiser`.
- """
- super().__init__(space)
- if x_0 is None:
- x_0 = space.zero_state()
- self.x_0 = space.project_state(x_0)
-
- self.noiser = WhiteNoiser(space, unit_noise)
- self.ranges = ranges
-
- def get_sample(self) -> Tensor:
- """Returns ``x_0`` distorted by white noise."""
- return self.noiser.noise(self.x_0, self.ranges)
-
- def covariance(self) -> Tensor:
- """Returns covariance of underlying noiser."""
- return self.noiser.covariance(self.ranges)
-
-
-class UniformSampler(CenteredSampler):
- """Convenience :py:class:`CenteredSampler` for uniform noise."""
-
- def __init__(self, space: StateSpace, ranges: Tensor, x_0: Tensor = None):
- super().__init__(space, ranges, centered_uniform, x_0)
-
-
-class GaussianSampler(CenteredSampler):
- """Convenience :py:class:`CenteredSampler` for Gaussian noise."""
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
diff --git a/dair_pll_old/dair_pll/study.py b/dair_pll_old/dair_pll/study.py
deleted file mode 100644
index e1f36c2..0000000
--- a/dair_pll_old/dair_pll/study.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import copy
-import logging
-import os.path
-import sys
-from dataclasses import dataclass
-from typing import Tuple, Type, Dict, Any
-
-import optuna
-import optuna.logging
-from torch import Tensor
-from torch.nn import Module
-
-from dair_pll import file_utils, hyperparameter
-from dair_pll.dataset_management import DataConfig
-from dair_pll.drake_experiment import DrakeSystemConfig, \
- MultibodyLearnableSystemConfig, DrakeMultibodyLearnableExperiment
-from dair_pll.experiment import SupervisedLearningExperiment
-from dair_pll.experiment_config import SupervisedLearningExperimentConfig
-from dair_pll.system import System
-
-OPTUNA_ENVIRONMENT_VARIABLE = 'OPTUNA_SERVER'
-
-OPTUNA_TRIAL_FINISHED_STATES = [optuna.trial.TrialState.COMPLETE,
- optuna.trial.TrialState.PRUNED]
-
-
-@dataclass
-class StudyConfig:
- n_trials: int = 100
- min_resource: int = 5
- n_sweep_runs: int = 5
- log_data_size_range: Tuple[int, int] = (3, 12)
- use_remote_storage: bool = True
- study_name: str = ''
- experiment_type: Type[
- SupervisedLearningExperiment] = SupervisedLearningExperiment
- default_experiment_config: SupervisedLearningExperimentConfig = SupervisedLearningExperimentConfig(
- )
-
-
-class Study:
- best_params: Dict = {}
-
- def __init__(self, config: StudyConfig) -> None:
- self.config = config
-
- def optimize(self, trial: optuna.trial.Trial) -> float:
-
- def epoch_callback(epoch: int, _system: System, _train_loss: Tensor,
- best_valid_loss: Tensor) -> None:
- trial.report(best_valid_loss.item(), step=epoch)
- if trial.should_prune():
- raise optuna.TrialPruned()
-
- config = self.config
- experiment_suggestion = hyperparameter.generate_suggestion(
- config.default_experiment_config, trial)
-
- trial_experiment_config = copy.deepcopy(
- config.default_experiment_config)
-
- hyperparameter.load_suggestion(trial_experiment_config,
- experiment_suggestion)
-
- run_name = file_utils.hyperparameter_opt_run_name(
- config.study_name, trial.number)
-
- trial_experiment_config.run_name = run_name
-
- experiment = config.experiment_type(trial_experiment_config)
- _, best_valid_loss, _ = experiment.train(epoch_callback)
- return best_valid_loss.item()
-
- def study(self) -> None:
- config = self.config
- log_data_size_range = config.log_data_size_range
- # N_train = config.default_experiment_config.data_config.N_train
-
- hps = file_utils.load_hyperparameters(
- self.config.default_experiment_config.storage,
- self.config.study_name)
-
- data_min = log_data_size_range[0]
- data_max = log_data_size_range[1] + 1
- for sweep_run in range(config.n_sweep_runs):
- for log_N_train in range(data_min, data_max):
- N_train = 2**log_N_train
- print(f"running sweep example for N_train = {N_train}")
- sys.stdout.flush()
- self.run_datasweep_sample(hps, sweep_run, N_train)
- print("done!")
- sys.stdout.flush()
-
- def run_datasweep_sample(self, hps: hyperparameter.ValueDict,
- sweep_run: int, N_train: int) -> None:
- sample_experiment_config = copy.deepcopy(
- self.config.default_experiment_config)
- hyperparameter.load_suggestion(sample_experiment_config, hps)
- # TODO: reengineer training fractions to have concrete values as
- # options.
- sample_experiment_config.data_config.n_train = N_train
-
- sample_experiment_config.run_name = file_utils.sweep_run_name(
- self.config.study_name, sweep_run, N_train)
-
- experiment = self.config.experiment_type(sample_experiment_config)
-
- def epoch_cb(epoch: int, model: Module, train_loss: float,
- best_valid_loss: float) -> None:
- pass
-
- experiment.generate_results(epoch_cb)
-
- def is_complete(self, study: optuna.study.Study) -> bool:
- trials = study.trials
- finished = [
- trial for trial in trials
- if trial.state in OPTUNA_TRIAL_FINISHED_STATES
- ]
- return len(finished) >= self.config.n_trials
-
- def stop_if_complete(self, study: optuna.study.Study,
- _: optuna.trial._frozen.FrozenTrial) -> None:
- if self.is_complete(study):
- study.stop()
-
- def optimize_hyperparameters(self) -> Dict[str, Any]:
- config = self.config
- optimizer_config = config.default_experiment_config.optimizer_config
-
- pruner = optuna.pruners.HyperbandPruner(
- min_resource=config.min_resource,
- max_resource=optimizer_config.epochs)
- if config.use_remote_storage:
- if not OPTUNA_ENVIRONMENT_VARIABLE in os.environ:
- raise EnvironmentError('Must set '
- f'{OPTUNA_ENVIRONMENT_VARIABLE} '
- 'to optuna server URI!')
- study = optuna.create_study(
- direction="minimize",
- pruner=pruner,
- study_name=config.study_name,
- storage=os.environ[OPTUNA_ENVIRONMENT_VARIABLE],
- load_if_exists=True)
- else:
- study = optuna.create_study(direction="minimize",
- pruner=pruner,
- study_name=config.study_name)
- if not self.is_complete(study):
- optuna.logging.get_logger("optuna").addHandler(
- logging.StreamHandler(sys.stdout))
- study.optimize(self.optimize,
- n_trials=config.n_trials,
- callbacks=[self.stop_if_complete])
- print("Study completed!")
- print(study.best_value)
- file_utils.save_hyperparameters(
- self.config.default_experiment_config.storage,
- self.config.study_name, study.best_params)
- return study.best_params
-
-
-if __name__ == '__main__':
- optuna.logging.set_verbosity(optuna.logging.DEBUG)
-
- CUBE_DATA_ASSET = 'contactnets_cube'
- BOX_URDF_ASSET = 'contactnets_cube.urdf'
- CUBE_MODEL = 'cube'
- STUDY_NAME = f'{CUBE_DATA_ASSET}_study'
- STORAGE_NAME = os.path.join(os.path.dirname(__file__), 'storage',
- STUDY_NAME)
- os.system(f'rm -r {file_utils.storage_dir(STORAGE_NAME)}')
-
- DT = 1. / 148.
-
- cube_urdf = file_utils.get_asset(BOX_URDF_ASSET)
- urdfs = {CUBE_MODEL: cube_urdf}
- base_config = DrakeSystemConfig(urdfs=urdfs)
- learnable_config = MultibodyLearnableSystemConfig(urdfs=urdfs)
-
- import_directory = file_utils.get_asset(CUBE_DATA_ASSET)
-
- data_config = DataConfig(storage=STORAGE_NAME,
- dt=DT,
- n_train=4,
- n_valid=2,
- n_test=2,
- import_directory=import_directory)
-
- default_experiment_config = SupervisedLearningExperimentConfig(
- base_config=base_config,
- learnable_config=learnable_config,
- data_config=data_config,
- )
-
- study_config = StudyConfig(
- study_name=STUDY_NAME,
- default_experiment_config=default_experiment_config,
- experiment_type=DrakeMultibodyLearnableExperiment,
- use_remote_storage=False)
-
- study = Study(study_config)
- study.study()
diff --git a/dair_pll_old/dair_pll/sweep_plot.py b/dair_pll_old/dair_pll/sweep_plot.py
deleted file mode 100644
index b67fa0e..0000000
--- a/dair_pll_old/dair_pll/sweep_plot.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import pickle
-from typing import List, Tuple, Dict, Callable, Optional
-
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-
-from dair_pll import file_utils
-from dair_pll.plot_styler import PlotStyler
-
-
-def average(list: List) -> float:
- return torch.tensor(list).mean().item()
-
-
-tt = lambda x: torch.tensor(x)
-na = lambda x: np.array(x, dtype=np.float32)
-
-
-def load_sweep(study_name: str) -> Dict:
- datasizes = file_utils.sweep_data_sizes(study_name)
- sweep = {}
- for N_train in datasizes:
- N_runs = file_utils.get_sweep_summary_count(study_name, N_train)
- if N_runs > 0:
- sweep[N_train] = []
- for N_run in range(N_runs):
- runfile = file_utils.sweep_summary_file(study_name, N_train,
- N_run)
- with open(runfile, 'rb') as f:
- sweep[N_train].append(pickle.load(f))
- # pdb.set_trace()
- return sweep
-
-
-def log_gaussian_band_values(runs: List[Dict], param: str,
- param2: Optional[str] = None) -> Tuple[
- float, float, float]:
- T = 2.02
- # pdb.set_trace()
- if param2 is None:
- v = torch.tensor([tt(d[param]).mean() for d in runs])
- else:
- v = torch.tensor(
- [tt(d[param]).mean() - tt(d[param2]).mean() for d in runs])
-
- if v.min() <= 0.:
- print("WARNING: fallback to gaussian interval")
- M = v.mean()
- S = v.std()
- return ((M - T * S).item(), M.item(), (M + T * S).item())
- log_v = torch.log(v)
- M = torch.mean(log_v)
- V = torch.var(log_v)
- N = v.nelement()
- l = M + (V / 2)
- # pdb.set_trace()
- r = T * torch.sqrt((V / N) + ((V ** 2) / (2 * (N - 1))))
- return (
- torch.exp(l - r).item(), torch.exp(l).item(), torch.exp(l + r).item())
-
-
-def query_sweep(sweep: Dict[int, List], func: Callable, param: str,
- param2: Optional[str] = None) -> Dict[
- int, Tuple[float, float, float]]:
- return {k: func(v, param, param2) for (k, v) in sweep.items()}
-
-
-def plot_sweep_comparison(plot_name: str, study_names: List[str]) -> None:
- study_bands = []
- metrics = ['loss', 'traj_mse', 'rot_err', 'pos_err']
- distributions = ['train_oracle', 'train_model', 'test_model']
- comps = ['oracle', 'train', 'test', 'suboptimality', 'generalization']
- comparison_chain = []
- comparison_names = []
- for metric in metrics:
- fields = [f'{dist}_{metric}' for dist in distributions]
- metric_names = [f'{metric}_{comp}' for comp in comps]
- comparison_names += metric_names
- comparison_chain += [(f, None) for f in fields]
- comparison_chain += [(fields[1], fields[0]), (fields[2], fields[1])]
-
- # fields = ['train_oracle_loss', 'train_model_loss', 'test_model_loss', 'test_model_traj_mse', 'test_model_rot_err', 'test_model_pos_err']
- # comparison_names = fields + ['model_suboptimality', 'generalization_error']
- # comparison_chain = [(f,None) for f in fields] + [(fields[1], fields[0]), (fields[2], fields[1])]
- colors = ['r', 'g', 'b']
- for study_name in study_names:
- sweep = load_sweep(study_name)
- bands = []
- for i in range(len(comparison_chain)):
- query = query_sweep(sweep, log_gaussian_band_values,
- comparison_chain[i][0], comparison_chain[i][1])
- bands.append(query)
- study_bands.append(bands)
-
- for band in bands:
- print(band)
-
- # pdb.set_trace()
- for i in range(len(comparison_chain)):
- sub_opt_loss = [b[i] for b in study_bands]
- x_lows = [na(list(b.keys())) for b in sub_opt_loss]
- xs = x_lows
- x_highs = x_lows
- y_lows = [na([b[k][0] for k in b.keys()]) for b in sub_opt_loss]
- ys = [na([b[k][1] for k in b.keys()]) for b in sub_opt_loss]
- y_highs = [na([b[k][2] for k in b.keys()]) for b in sub_opt_loss]
- plt.figure()
- ps = PlotStyler()
- j = 0
- for x_low, x, x_high, y_low, y, y_high in zip(x_lows, xs, x_highs,
- y_lows, ys, y_highs):
- ps.plot(x, y, color=colors[j])
- # pdb.set_trace()
- ps.plot_bands(x_low, x_high, y_low, y_high, color=colors[j])
- j += 1
- ps.set_default_styling(directory=file_utils.plots_dir())
- ps.save_fig(f'{plot_name}_{comparison_names[i]}')
- # ps.show_fig()
-
-
-if __name__ == '__main__':
- TEST = False
-
- if TEST:
- plot_name = 'mujoco_cube_test'
- study_names = [f'mujoco_cube_{stiffness}_sweep_test' for stiffness in
- [300]]
- else:
- plot_name = 'mujoco_cube'
- study_names = [f'mujoco_cube_{stiffness}' for stiffness in
- [100, 300, 2500]]
- plot_sweep_comparison(plot_name, study_names)
diff --git a/dair_pll_old/dair_pll/system.py b/dair_pll_old/dair_pll/system.py
deleted file mode 100644
index faa06e2..0000000
--- a/dair_pll_old/dair_pll/system.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""System abstract type definition.
-
-This file contains the fundamental ``System`` interface for dynamical
-systems. Systems are defined by their underlying dynamics; integration
-scheme; and an initial condition sampler for both states and hidden
-states/``carry``.
-
-Unlike ``Integrator``, ``System`` requires a temporal sequence of initial
-condition states; this is done to accommodate systems with hidden states that
-behave differently when "preloaded" with an initialization trajectory,
-such as a UKF estimator or an RNN.
-
-``System`` is used to interface with external simulators, e.g. Drake and MuJoCo.
-"""
-from abc import ABC
-from dataclasses import dataclass, field
-from typing import Tuple, Callable, Optional, Dict, List
-
-import numpy as np
-import torch
-from torch import Tensor
-from torch.nn import Module
-
-from dair_pll import state_space
-from dair_pll.integrator import Integrator
-from dair_pll.state_space import StateSpace, StateSpaceSampler
-
-
-@dataclass
-class MeshSummary:
- r""":py:func:`dataclasses.dataclass` for mesh visualization."""
- vertices: Tensor = Tensor()
- r"""Vertices in mesh, ``(n_vert, 3)``\ ."""
- faces: Tensor = Tensor()
- r"""3-tuple indices of vertices that form faces, ``(n_face, 3)``\ ."""
-
-
-@dataclass
-class SystemSummary:
- """:py:func:`dataclasses.dataclass` for reporting information about the
- progress of a training run."""
- scalars: Dict[str, float] = field(default_factory=dict)
- videos: Dict[str, Tuple[np.ndarray, int]] = field(default_factory=dict)
- meshes: Dict[str, MeshSummary] = field(default_factory=dict)
- overlaid_scalars: Optional[List[Dict[str, float]]] = None
-
-
-class System(ABC, Module):
- """Class for encapsulating a dynamical system.
-
- Primarily implemented as a thin shell of ``Integrator`` with various
- sampling interfaces defined.
-
- A major difference from the ``Integrator`` interface is that ``System``
- accepts a sequence of states, along with a single ``carry``/hidden state, as
- an initial condition to accommodate proper initialization of some types
- of recurrent dynamics.
- """
- space: StateSpace
- integrator: Integrator
- state_sampler: StateSpaceSampler
- carry_callback: Optional[Callable[[], Tensor]]
- max_batch_dim: Optional[int]
-
- def __init__(self,
- space: StateSpace,
- integrator: Integrator,
- max_batch_dim: Optional[int] = None) -> None:
- """Inits ``System`` with prescribed integration properties.
-
- Args:
- space: State space of underlying dynamics
- integrator: Integrator of underlying dynamics
- max_batch_dim: Maximum number of batch dimensions supported by
- ``integrator``.
- """
- super().__init__()
- self.space = space
- self.integrator = integrator
- self.state_sampler = state_space.ZeroSampler(space)
- # pylint: disable=E1103
- self.carry_callback = lambda: torch.zeros((1, 1))
- self.max_batch_dim = max_batch_dim
-
- def sample_trajectory(self, length: int) -> Tuple[Tensor, Tensor]:
- """Sample
-
- Args:
- length: duration of trajectory in number of time steps
-
- Returns:
- (length, space.nx) state trajectory
- (length, ?) carry trajectory
- """
- x_0, carry_0 = self.sample_initial_condition()
- return self.simulate(x_0, carry_0, length)
-
- def simulate(self,
- x_0: Tensor,
- carry_0: Tensor,
- steps: int = 1) -> Tuple[Tensor, Tensor]:
- """Simulate forward in time from initial condition.
-
- Args:
- x_0: ``(*, T_0, space.n_x)`` initial state sequence
- carry_0: ``(*, ?)`` initial hidden state
- steps: number of steps to take beyond initial condition
-
- Returns:
- ``(*, steps + 1, space.n_x)`` state trajectory
- ``(*, steps + 1, ?)`` hidden state trajectory
- """
-
- # If batching is more dimensions than allowed, iterate over outer
- # dimension.
- if self.max_batch_dim is not None and \
- (x_0.dim() - 2) > self.max_batch_dim:
-
- x_carry_list = [
- self.simulate(x0i, c0i) for x0i, c0i in zip(x_0, carry_0)
- ]
- # pylint: disable=E1103
- x_trajectory = torch.stack([x_carry[0] for x_carry in x_carry_list])
- carry_trajectory = torch.stack(
- [x_carry[1] for x_carry in x_carry_list])
- else:
- x, carry = self.preprocess_initial_condition(x_0, carry_0)
- x_trajectory, carry_trajectory = self.integrator.simulate(
- x, carry, steps)
- return x_trajectory, carry_trajectory
-
- def sample_initial_condition(self) -> Tuple[Tensor, Tensor]:
- """Queries state and hidden state samplers for initial condition."""
- assert self.carry_callback is not None
-
- # Reshapes (space.n_x,) sample into duration-1 sequence.
- return self.state_sampler.get_sample().reshape(
- 1, self.space.n_x), self.carry_callback()
-
- def set_state_sampler(self, sampler: StateSpaceSampler) -> None:
- """Setter for state initial condition sampler."""
- self.state_sampler = sampler
-
- def set_carry_sampler(self, callback: Callable[[], Tensor]) -> None:
- """Setter for hidden state initial condition sampler."""
- self.carry_callback = callback
-
- def preprocess_initial_condition(self, x_0: Tensor,
- carry_0: Tensor) -> Tuple[Tensor, Tensor]:
- r"""Preprocesses initial condition state sequence into single state
- initial condition for integration.
-
- For example, an RNN would use the state sequence to "preload" hidden
- states in the RNN, where ``carry_0`` would provide an initial hidden
- state, and the output would be the hidden state after the RNN
- receives the state sequence.
-
- Args:
- x_0: ``(*, T_0, space.n_x)`` initial state sequence.
- carry_0: ``(*, ?)`` initial hidden state.
-
- Returns:
- ``(*, space.n_x)`` processed initial state.
- ``(*, ?)`` processed initial hidden state.
- """
- assert len(x_0.shape) >= 2
- assert len(carry_0.shape) >= 1
- if self.max_batch_dim is not None:
- assert len(x_0.shape) <= 2 + self.max_batch_dim
- assert len(carry_0.shape) <= 1 + self.max_batch_dim
- assert x_0.shape[-1] == self.space.n_x
-
- # Just return most recent state, don't do anything to hidden state
- return x_0[..., -1, :], carry_0
-
- def summary(self, statistics: Dict) -> SystemSummary:
- """Summarizes the current behavior and properties of the system.
-
- Args:
- statistics: dictionary of training statistics
-
- Returns:
- Summary of system.
-
- Todo:
- Update for structured statistics object.
- Fix ``pylint`` warning elegantly.
- """
- # no-ops to prevent pesky pylint errors
- assert statistics is not None
- assert self is not None
- return SystemSummary()
-
- def get_regularization_terms(self, x: Tensor, u: Tensor,
- x_plus: Tensor) -> List[Tensor]:
- """Return a list of possible regularization terms. This template
- returns no regularizers.
- """
- return []
\ No newline at end of file
diff --git a/dair_pll_old/dair_pll/tensor_utils.py b/dair_pll_old/dair_pll/tensor_utils.py
deleted file mode 100644
index b83d228..0000000
--- a/dair_pll_old/dair_pll/tensor_utils.py
+++ /dev/null
@@ -1,495 +0,0 @@
-"""Tensor utility functions.
-
-Contains various utility functions for common tensor operations required
-throughout the package. All such future functions should be placed here,
-with the following exceptions:
-
- * Utilities for operating directly on :math:`SO(3)` should be placed in
- :py:mod:`dair_pll.quaternion`
-"""
-from typing import List, cast
-
-import torch
-from torch import Tensor
-
-
-def tile_dim(tiling_tensor: Tensor, copies: int, dim: int = 0) -> Tensor:
- """Tiles tensor along specified dimension.
-
- Args:
- tiling_tensor: ``(n_0, ..., n_{k-1})`` tensor.
- copies: number of copies, ``copies >= 1``.
- dim: dimension to be tiled, ``-k <= dim <= k - 1``.
-
- Returns:
- ``(n_0, ..., n * n_dim, ... n_{k-1})`` tiled tensor.
-
- Raises:
- ValueError: when ``copies`` is not a strictly-positive integer
-
- """
- if not copies >= 1:
- raise ValueError(
- f'Tiling count should be positive int, got {copies} instead.')
-
- # pylint: disable=E1103
- return torch.cat([tiling_tensor] * copies, dim=dim)
-
-
-def tile_last_dim(tiling_tensor: Tensor, copies: int) -> Tensor:
- """Tile right dimension (``-1``) via :py:func:`tile_dim`"""
- return tile_dim(tiling_tensor, copies, -1)
-
-
-def tile_penultimate_dim(tiling_tensor: Tensor, copies: int) -> Tensor:
- """Tile second-to-last dimension (``-2``) :py:func:`tile_dim`"""
- return tile_dim(tiling_tensor, copies, -2)
-
-
-def pbmm(t_1: Tensor, t_2: Tensor) -> Tensor:
- """Multiplies matrices with optional batching.
-
- Wrapper function that performs a final-axes (``-2,-1``) matrix-matrix,
- vector-matrix, matrix-vector, or vector-vector product depending on the
- shape of ``t_1`` and ``t_2``. The following logic is used:
-
- * do a matrix-matrix multiplication if both factors have dimension at
- least two, and broadcast to the larger (inferred) batch.
- * do a vector-matrix / matrix-vector multiplication if one factor is
- a vector and the other has dimension ``>= 2``
- * do a vector-vector multiplication if both factors are vectors.
-
-
- Args:
- t_1: ``(*, l, m)`` or ``(l, m)`` or ``(m,)`` left tensor factor.
- t_2: ``(*, m, n)`` or ``(m, n)`` or ``(m,)`` right tensor factor.
-
- Returns:
- ``(*, l, n) if l, n > 1 or (*, l) if l > 1, n = 1 or (*, n)
- if l = 1, or n > 1`` or scalar ``if dim(t_1) == dim(t_2) == 1`` product
- tensor.
- """
- t_1_dim = t_1.dim()
- t_2_dim = t_2.dim()
- needs_squeeze = None
- # case 1: single dot product
- if max(t_1_dim, t_2_dim) == 1:
- # dot product
- return (t_1 * t_2).sum()
-
- # temporarily expand dimension for vector-matrix product
- if t_1_dim == 1:
- t_1 = t_1.unsqueeze(0)
- t_1_dim = 2
- needs_squeeze = -2
- elif t_2_dim == 1:
- t_2 = t_2.unsqueeze(1)
- t_2_dim = 2
- needs_squeeze = -1
-
- # cases 2 and 3: matrix product
- if max(t_1_dim, t_2_dim) > 2:
- # match batching
- if t_1_dim < t_2_dim:
- t_1 = t_1.expand(t_2.shape[:-t_1_dim] + t_1.shape)
- elif t_1_dim > t_2_dim:
- t_2 = t_2.expand(t_1.shape[:-t_2_dim] + t_2.shape)
-
- # pylint: disable=E1103
- product = torch.matmul(t_1.float(), t_2.float())
- else:
- product = t_1.mm(t_2)
-
- if needs_squeeze:
- product = product.squeeze(needs_squeeze)
- return product
-
-
-def deal(dealing_tensor: Tensor,
- dim: int = 0,
- keep_dim: bool = False) -> List[Tensor]:
- """Converts dim of tensor to list.
-
- Example:
- Let ``t`` be a 3-dimensional tensor of shape ``(3,5,3)`` such that::
-
- t[:, i, :] == torch.eye(3).
-
- Then ``deal(t, dim=1)`` returns a list of 5 ``(3,3)`` identity tensors,
- and ``deal(t, dim=1, keep_dim=True)`` returns a list of ``(3,1,3)``
- tensors.
-
- Args:
- dealing_tensor: ``(n_0, ..., n_dim, ..., n_{k-1})`` shaped tensor.
- dim: tensor dimension to deal, ``-k <= dim <= k-1``.
- keep_dim: whether to squeeze list items along ``dim``.
-
- Returns:
- List of dealt sub-tensors of shape ``(..., n_{dim-1}, {n_dim+1}, ...)``
- or ``(..., n_{dim-1}, 1, {n_dim+1}, ...)``.
- """
- tensor_list = torch.split(dealing_tensor, 1, dim=dim)
- if keep_dim:
- return tensor_list
- return [tensor_i.squeeze(dim) for tensor_i in tensor_list]
-
-
-def skew_symmetric(vectors: Tensor) -> Tensor:
- r"""Converts vectors in :math:`\mathbb{R}^3` into skew-symmetric form.
-
- Converts vector(s) :math:`v` in ``vectors`` into skew-symmetric matrix:
-
- .. math::
-
- S(v) = -S(v)^T = \begin{bmatrix} 0 & -v_3 & v_2 \\
- v_3 & 0 & -v_1 \\
- -v_2 & v_1 & 0 \end{bmatrix}
-
- Args:
- vectors: ``(*, 3)`` vector(s) to convert to matrices
-
- Returns:
- ``(*, 3, 3)`` skew-symmetric matrices :math:`S(v)`
- """
- # pylint: disable=E1103
- zero = torch.zeros_like(vectors[..., 0])
-
- # pylint: disable=E1103
- row_1 = torch.stack((zero, -vectors[..., 2], vectors[..., 1]), -1)
- row_2 = torch.stack((vectors[..., 2], zero, -vectors[..., 0]), -1)
- row_3 = torch.stack((-vectors[..., 1], vectors[..., 0], zero), -1)
-
- return torch.stack((row_1, row_2, row_3), -2)
-
-
-def symmetric_offdiagonal(vectors: Tensor) -> Tensor:
- r"""Converts vectors in :math:`\mathbb{R}^3` into symmetric off-diagonal
- form. This is the same as skew symmetric except for the skew negative
- signs.
-
- Converts vector(s) :math:`v` in ``vectors`` into symmetric matrix:
-
- .. math::
-
- S(v) = S(v)^T = \begin{bmatrix} 0 & v_3 & v_2 \\
- v_3 & 0 & v_1 \\
- v_2 & v_1 & 0 \end{bmatrix}
-
- Args:
- vectors: ``(*, 3)`` vector(s) to convert to matrices
-
- Returns:
- ``(*, 3, 3)`` symmetric matrices :math:`S(v)`
- """
- # pylint: disable=E1103
- zero = torch.zeros_like(vectors[..., 0])
-
- # pylint: disable=E1103
- row_1 = torch.stack((zero, vectors[..., 2], vectors[..., 1]), -1)
- row_2 = torch.stack((vectors[..., 2], zero, vectors[..., 0]), -1)
- row_3 = torch.stack((vectors[..., 1], vectors[..., 0], zero), -1)
-
- return torch.stack((row_1, row_2, row_3), -2)
-
-
-def one_vector_block_diagonal(num_blocks: int, vector_length: int) -> Tensor:
- """Computes a block diagonal matrix with column vectors of ones as blocks.
-
- Associated with the mathematical symbol :math:`E`.
-
- Example:
- ::
-
- one_vector_block_diagonal(3, 2) == tensor([
- [1., 0., 0.],
- [1., 0., 0.],
- [0., 1., 0.],
- [0., 1., 0.],
- [0., 0., 1.],
- [0., 0., 1.]]).
-
- Args:
- num_blocks: number of columns.
- vector_length: number of ones in each matrix diagonal block.
-
- Returns:
- ``(n * vector_length, n)`` 0-1 tensor.
- """
- # pylint: disable=E1103
- return torch.eye(num_blocks).repeat(1, vector_length).reshape(
- num_blocks * vector_length, num_blocks)
-
-
-def spatial_to_point_jacobian(p_BoP_E: Tensor) -> Tensor:
- r"""Body-fixed translational velocity to spatial velocity Jacobian.
-
- Takes a batch of points :math:`[^{Bo}p^P]_E` fixed to body :math:`B` and
- expressed in some coordinates :math:`E`, and constructs the Jacobian of
- their linear velocity in some other frame :math:`A` w.r.t. the
- :math:`E`-coordinate spatial velocity of :math:`B` relative to :math:`A`.
-
- In detail, let the :math:`i`th element of the batch represent point ``Pi``
- as :math:`[^{Bo}p^{Pi}]_E`, and let Ao be fixed in A. The Jacobian
- calculated is
-
- .. math::
-
- J = \frac{\partial [^Av^{Pi}]_E }{\partial [^AV^B]_E}.
-
- We have that :math:`[^AV^B]_E = [^A\omega^B; ^{A}v^{Bo}]_E`, and from
- kinematics that
-
- .. math::
-
- ^Av^{Pi}= ^{A}v^{Bo} + ^A\omega^B \times ^{Bo}p^{Pi}.
-
- Thus, the Jacobian is of the form
-
- .. math::
-
- J = [-S([^{Bo}p^{Pi}]_E), I_3],
-
- where :math:`S` is calculated via :py:func:`skew_symmetric`.
-
- Args:
- p_BoP_E: ``(*, 3)``, body frame point(s) :math:`P` in coordinates
- :math:`E`
-
- Returns:
- ``(*, 3, 6)`` Jacobian tensor(s) :math:`J`
-
- """
- left = -skew_symmetric(p_BoP_E)
-
- # pylint: disable=E1103
- right = torch.eye(3).expand(p_BoP_E.shape[:-1] + (3, 3))
-
- # pylint: disable=E1103
- return torch.cat((left, right), dim=-1)
-
-
-def trace_identity(matrices: Tensor) -> Tensor:
- r"""Converts batch of matrices :math:`M \in \mathbb{R}^{n \times n}` into a
- batch of tensors equal to :math:`\mathrm{trace} M I_n`\ .
- Args:
- matrices: ``(*, n, n)`` matrices to convert.
- Returns:
- ``(*, n, n)`` diagonal matrices.
- """
- assert matrices.shape[-2] == matrices.shape[-1]
- expand_shape = matrices.shape[:-1]
- traces = matrices.diagonal(dim1=-1, dim2=-2).sum(-1, keepdim=True)
- return torch.diag_embed(traces.expand(expand_shape))
-
-
-def rotation_matrix_from_one_vector(directions: Tensor, axis: int) -> Tensor:
- r"""Converts a batch of directions for specified axis, to a
- batch of rotation matrices.
-
- Specifically, if the ``i``\ th provided direction is ``d_i``, then the
- ``i``\ th returned rotation matrix ``R_i`` obeys::
-
- R_i[:, axis] == d_i.
-
- Reimplements the algorithm from Drake's
- :py:meth:`pydrake.math.RotationMatrix_[float].MakeFromOneVector`. For more
- details,
- see ``rotation_matrix.cc:L13`` at the following address:
-
- https://github.com/RobotLocomotion/drake/blob/d9c453d214ef715c89ab0e8553cae24900b7adde/math/rotation_matrix.cc#L13
-
- Args:
- directions: ``(*, 3)`` x/y/z directions.
- axis: ``0``, ``1``, or ``2`` depending on if ``directions`` are x, y, or
- z.
- Returns:
- ``(*, 3, 3)`` rotation matrix batch.
- """
- assert axis in [0, 1, 2]
- original_shape = directions.shape
- directions = directions.view(-1, 3)
- # pylint: disable=E1103
- batch_range = torch.arange(directions.shape[0])
-
- column_a = directions / directions.norm(dim=-1, keepdim=True)
-
- # pylint: disable=E1103
- min_a = torch.abs(column_a).min(dim=-1)
- min_magnitude_a = directions[batch_range, min_a.indices]
- axis_i = min_a.indices
- axis_j = (axis_i + 1) % 3
- axis_k = (axis_j + 1) % 3
-
- # pylint: disable=E1103
- magnitude_a_u = torch.sqrt(1 - min_magnitude_a * min_magnitude_a)
- axis_c_correction = -min_magnitude_a / magnitude_a_u
-
- # pylint: disable=E1103
- column_b = torch.zeros_like(column_a)
- column_b[batch_range,
- axis_j] += -column_a[batch_range, axis_k] / magnitude_a_u
- column_b[batch_range,
- axis_k] += column_a[batch_range, axis_j] / magnitude_a_u
-
- column_c = torch.zeros_like(column_a)
- column_c[batch_range, axis_i] += magnitude_a_u
- column_c[batch_range,
- axis_j] += axis_c_correction * column_a[batch_range, axis_j]
- column_c[batch_range,
- axis_k] += axis_c_correction * column_a[batch_range, axis_k]
-
- columns = [torch.tensor(0.)] * 3
- columns[axis] = column_a
- columns[(axis + 1) % 3] = column_b
- columns[(axis + 2) % 3] = column_c
-
- return torch.stack(columns, dim=-1).reshape(original_shape + (3,))
-
-
-def broadcast_lorentz(vectors: Tensor) -> Tensor:
- r"""Utility function that broadcasts scalars into Lorentz product cone
- format.
-
- This function maps a given vector :math:`v = [v_1, \dots, v_n]` in given
- batch ``vectors`` to
-
- .. math::
-
- \begin{bmatrix} v & v_1 & v_1 & \cdots & v_n & v_n \end{bmatrix}.
-
- Args:
- vectors: ``(*, n)`` vectors to be broadcasted.
- Returns:
- ``(*, 3 * n)`` broadcasted vectors.
- """
- n_cones = vectors.shape[-1]
- double_vectors_shape = vectors.shape[:-1] + (2 * n_cones,)
- vectors_tiled = vectors.unsqueeze(-1).repeat(
- [1] * len(vectors.shape) + [2]).reshape(double_vectors_shape)
- # pylint: disable=E1103
- return torch.cat((vectors, vectors_tiled), dim=-1)
-
-
-def project_lorentz(vectors: Tensor) -> Tensor:
- r"""Utility function that projects vectors in Lorentz cone product.
-
- This function takes in a batch of vectors
-
- .. math::
-
- \begin{align}
- v &= \begin{bmatrix} v_{n1} & \cdots v_{nk} & v_{t1} & \cdots v_{tk}
- \end{bmatrix},\\
- v_{ni} &\in \mathbb{R},\\
- v_{ti} &\in \mathbb{R}^2,\\
- \end{align}
-
- and projects each :math:`v_i = [v_{ni} v_{ti}]` into the Lorentz cone
- :math:`L = \{ v_{ni} \geq ||v_{ti}||_2\}` via the following piecewise
- formula:
-
- * if :math:`v_i \in L`, it remains the same.
- * if :math:`v_i \in L^{\circ} = \{-v_{ni} \geq ||v_{ti}||_2\}` (the
- polar cone), replace it with :math:`0`.
- * if :math:`v_i \not\in L \cup L^\circ`, replace it with
-
- .. math::
-
- v = \begin{bmatrix} n & \frac{n}{||v_{ti}||_2}v_{ti}
- \end{bmatrix},
-
- where :math:`n = \frac{1}{2}(v_{ni} + ||v_{ti}||_2)`.
-
-
- Args:
- vectors: ``(*, 3 * n)`` vectors to be projected.
- Returns:
- ``(*, 3 * n)`` broadcasted vectors.
- """
- # pylint: disable=too-many-locals
- assert vectors.shape[-1] % 3 == 0
- n_cones = vectors.shape[-1] // 3
-
- normals = vectors[..., :n_cones]
- tangents = vectors[..., n_cones:]
- tangent_vectors_shape = tangents.shape[:-1] + (n_cones, 2)
- tangent_norms = tangents.reshape(tangent_vectors_shape).norm(dim=-1)
-
- not_in_lorentz_cone = tangent_norms > normals
- in_polar_cone: Tensor = cast(Tensor, tangent_norms <= -normals)
- in_neither_cone: Tensor = cast(Tensor,
- (~in_polar_cone) & not_in_lorentz_cone)
-
- in_polar_mask = broadcast_lorentz(in_polar_cone)
- in_neither_mask = broadcast_lorentz(in_neither_cone)
-
- projected_vectors = vectors.clone()
-
- projected_vectors[in_polar_mask] *= 0.
-
- normals_rescaled = (normals + tangent_norms) / 2
- tangent_normalizer = normals_rescaled / tangent_norms
- tangent_rescaled = tangents * tangent_normalizer.unsqueeze(-1).expand(
- tangent_vectors_shape).reshape(tangents.shape)
- # pylint: disable=E1103
- vectors_rescaled = torch.cat((normals_rescaled, tangent_rescaled), dim=-1)
-
- projected_vectors[in_neither_mask] = vectors_rescaled[in_neither_mask]
- return projected_vectors
-
-
-def reflect_lorentz(vectors: Tensor) -> Tensor:
- r"""Utility function that reflects vectors along the Lorentz cone's
- normal axis.
- Args:
- vectors: ``(*, 3 * n)`` vectors to be reflected.
- Returns:
- ``(*, 3 * n)`` reflected vectors.
- """
- # pylint: disable=too-many-locals
- assert vectors.shape[-1] % 3 == 0
- n_cones = vectors.shape[-1] // 3
-
- normals = vectors[..., :n_cones]
- tangents = vectors[..., n_cones:]
- return torch.cat((-normals, tangents), dim=-1)
-
-
-def sappy_reorder_mat(n_cones: int) -> Tensor:
- r"""Generates a 0-1 matrix that reorders force variable indices between
- ``dair_pll`` ordering and ``sappy`` ordering.
-
- ``dair_pll`` orders force variables as
-
- .. math::
-
- \lambda = \begin{bmatrix} \lambda_{n1}; & \cdots \lambda_{nk}; &
- \lambda_{t1}; &
- \cdots \lambda_{tk}; \end{bmatrix}\,,
-
- whereas ``sappy`` accepts decision variables in format
-
- .. math::
-
- \lambda_s = \begin{bmatrix} \lambda_{t1}; & \lambda_{n1}; &
- \cdots & \lambda_{tk}; & \lambda_{nk} \end{bmatrix}\,.
-
- This function returns matrix :math:`M` to map betweens the two as
-
- .. math::
-
- \lambda = M\lambda_s
-
- Args:
- n_cones: number of contacts :math:`0`
-
- Returns:
- ``(3 * n_cones, 3 * n_cones)`` reordering matrix.
- """
- # pylint: disable=E1103
- matrix = torch.zeros((3 * n_cones, 3 * n_cones))
- for cone in range(n_cones):
- matrix[cone][3 * cone + 2] = 1
- matrix[n_cones + 2 * cone][3 * cone] = 1
- matrix[n_cones + 2 * cone + 1][3 * cone + 1] = 1
- return matrix
diff --git a/dair_pll_old/dair_pll/ukf.py b/dair_pll_old/dair_pll/ukf.py
deleted file mode 100644
index 395992f..0000000
--- a/dair_pll_old/dair_pll/ukf.py
+++ /dev/null
@@ -1,591 +0,0 @@
-import numpy as np
-from scipy.linalg import block_diag
-
-import pdb
-
-class UKF:
- """The Unscented Kalman Filter on (parallelizable) Manifolds.
-
- This filter is the implementation described in :cite:`brossardCode2019` . It
- is well adapted to relatively small systems and for understanding the
- methodology of **UKF-M**, otherwise see :meth:`~ukfm.JUKF`. Noise covariance
- parameters are assumed static for convenience, i.e. :math:`\\mathbf{Q}_n =
- \\mathbf{Q}`, and :math:`\\mathbf{R}_n = \\mathbf{R}`.
-
- :arg f: propagation function :math:`f`.
- :arg h: observation function :math:`h`.
- :arg phi: retraction :math:`\\boldsymbol{\\varphi}`.
- :arg phi_inv: inverse retraction :math:`\\boldsymbol{\\varphi}^{-1}`.
- :ivar Q: propagation noise covariance matrix (static) :math:`\\mathbf{Q}`.
- :ivar R: observation noise covariance matrix (static) :math:`\\mathbf{R}`.
- :arg alpha: sigma point parameters. Must be 1D array with 3 values.
- :ivar state: state :math:`\\boldsymbol{\\hat{\\chi}}_n`, initialized at
- ``state0``.
- :ivar P: state uncertainty covariance :math:`\\mathbf{P}_n`, initialized at
- ``P0``.
- """
-
- TOL = 1e-9 # tolerance parameter (avoid numerical issue)
-
- def __init__(self, f, h, phi, phi_inv, Q, R, alpha, state0, P0):
- self.f = f
- self.h = h
- self.phi = phi
- self.phi_inv = phi_inv
- self.Q = Q
- self.R = R
- self.state = state0
- self.P = P0
-
- # Cholesky decomposition of Q
- self.cholQ = np.linalg.cholesky(Q).T
-
- # variable dimensions
- self.d = P0.shape[0]
- self.q = Q.shape[0]
- self.l = R.shape[0]
-
- self.Id_d = np.eye(self.d)
-
- # sigma point weights
- self.weights = self.WEIGHTS(P0.shape[0], Q.shape[0], alpha)
-
- class WEIGHTS:
- """Sigma point weights.
-
- Weights are computed as:
-
- .. math::
-
- \\lambda &= (\\alpha^2 - 1) \\mathrm{dim}, \\\\
- w_j &= 1/(2(\\mathrm{dim} + \\lambda)), \\\\
- w_m &= \\lambda/(\\lambda + \\mathrm{dim}), \\\\
- w_0 &= \\lambda/(\\lambda + \\mathrm{dim}) + 3 - \\alpha^2,
-
- where :math:`\\alpha` is a parameter set between :math:`10^{-3}` and
- :math:`1`, and :math:`\\mathrm{dim}` is the dimension of the
- sigma-points (:math:`d` or :math:`q`).
-
- This variable contains sigma point weights for propagation (w.r.t. state
- uncertainty and noise) and for update.
- """
-
- def __init__(self, d, q, alpha):
- # propagation w.r.t. state
- self.d = self.W(d, alpha[0])
- # propagation w.r.t. noise
- self.q = self.W(q, alpha[1])
- # update w.r.t. state
- self.u = self.W(d, alpha[2])
-
- class W:
- def __init__(self, l, alpha):
- m = (alpha**2 - 1) * l
- self.sqrt_d_lambda = np.sqrt(l + m)
- self.wj = 1/(2*(l + m))
- self.wm = m/(m + l)
- self.w0 = m/(m + l) + 3 - alpha**2
-
-
- def propagation(self, omega, dt):
- """UKF propagation step.
-
- .. math::
-
- \\boldsymbol{\\hat{\\chi}}_{n} &\\leftarrow
- \\boldsymbol{\\hat{\\chi}}_{n+1} =
- f\\left(\\boldsymbol{\\hat{\\chi}}_{n}, \\boldsymbol{\\omega}_{n},
- \\mathbf{0}\\right) \\\\
- \\mathbf{P}_{n} &\\leftarrow \\mathbf{P}_{n+1} \\\\
-
- Mean state and covariance are propagated.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
-
- P = self.P + self.TOL*self.Id_d
-
- # update mean
- w = np.zeros(self.q)
- new_state = self.f(self.state, omega, w, dt)
-
- # compute covariance w.r.t. state uncertainty
- w_d = self.weights.d
-
- # set sigma points
- #pdb.set_trace()
- xis = w_d.sqrt_d_lambda * np.linalg.cholesky(P).T
- new_xis = np.zeros((2*self.d, self.d))
-
- # retract sigma points onto manifold
- #pdb.set_trace()
- for j in range(self.d):
- s_j_p = self.phi(self.state, xis[j])
- s_j_m = self.phi(self.state, -xis[j])
- new_s_j_p = self.f(s_j_p, omega, w, dt)
- new_s_j_m = self.f(s_j_m, omega, w, dt)
- new_xis[j] = self.phi_inv(new_state, new_s_j_p)
- new_xis[self.d + j] = self.phi_inv(new_state, new_s_j_m)
-
- # compute covariance
- new_xi = w_d.wj * np.sum(new_xis, 0)
- new_xis = new_xis - new_xi
-
-
-
- new_P = w_d.wj * new_xis.T.dot(new_xis) + \
- w_d.w0*np.outer(new_xi, new_xi)
-
- #pdb.set_trace()
-
- # compute covariance w.r.t. noise
- w_q = self.weights.q
- new_xis = np.zeros((2*self.q, self.d))
-
- # retract sigma points onto manifold
- for j in range(self.q):
- w_p = w_q.sqrt_d_lambda * self.cholQ[j]
- w_m = -w_q.sqrt_d_lambda * self.cholQ[j]
- new_s_j_p = self.f(self.state, omega, w_p, dt)
- new_s_j_m = self.f(self.state, omega, w_m, dt)
- new_xis[j] = self.phi_inv(new_state, new_s_j_p)
- new_xis[self.q + j] = self.phi_inv(new_state, new_s_j_m)
-
- # compute covariance
- new_xi = w_q.wj * np.sum(new_xis, 0)
- new_xis = new_xis - new_xi
-
- #pdb.set_trace()
- Q = w_q.wj * new_xis.T.dot(new_xis) + w_q.w0*np.outer(new_xi, new_xi)
-
- # sum covariances
- self.P = new_P + Q
- self.state = new_state
-
- def update(self, y):
- """UKF update step.
-
- .. math::
-
- \\boldsymbol{\\hat{\\chi}}_{n} &\\leftarrow \\boldsymbol{\\hat{\\chi}}
- _{n}^{+} \\\\
- \\mathbf{P}_{n} &\\leftarrow \\mathbf{P}_{n}^{+} \\\\
-
- :var y: 1D array (vector) measurement :math:`\\mathbf{y}_n`.
- """
- #pdb.set_trace()
- P = self.P + self.TOL*self.Id_d
-
- # set sigma points
- w_d = self.weights.d
- xis = w_d.sqrt_d_lambda * np.linalg.cholesky(P).T
-
- # compute measurement sigma_points
- ys = np.zeros((2*self.d, self.l))
- #pdb.set_trace()
- hat_y = self.h(self.state)
- for j in range(self.d):
- s_j_p = self.phi(self.state, xis[j])
- s_j_m = self.phi(self.state, -xis[j])
- ys[j] = self.h(s_j_p)
- ys[self.d + j] = self.h(s_j_m)
-
- #pdb.set_trace()
-
- # measurement mean
- y_bar = w_d.wm * hat_y + w_d.wj * np.sum(ys, 0)
-
- # prune mean before computing covariance
- ys = ys - y_bar
- hat_y = hat_y - y_bar
-
- # compute covariance and cross covariance matrices
- P_yy = w_d.w0*np.outer(hat_y, hat_y) + w_d.wj*ys.T.dot(ys) + self.R
- P_xiy = w_d.wj*np.hstack([xis.T, -xis.T]).dot(ys)
-
- # Kalman gain
- K = np.linalg.solve(P_yy, P_xiy.T).T
- # update state
- xi_plus = K.dot(y - y_bar)
- self.state = self.phi(self.state, xi_plus)
-
- # update covariance
- self.P = P - K.dot(P_yy).dot(K.T)
- # avoid non-symmetric matrix
- self.P = (self.P + self.P.T)/2
-
-
-class JUKF:
- """The Unscented Kalman Filter on (parallelizable) Manifolds, that infers
- Jacobian.
-
- This filter is an alternative implementation to the method described in
- :cite:`brossardCode2019`, with exactly the same results. It spares
- computational time for systems when only a part of the state is involved in
- a propagation or update step. It can also be used for state augmentation.
- Only noise covariance parameter for propagation is assumed static for
- convenience, i.e. :math:`\\mathbf{Q}_n = \\mathbf{Q}`.
-
- :arg f: propagation function :math:`f`.
- :arg h: observation function :math:`h`.
- :arg phi: retraction :math:`\\boldsymbol{\\varphi}`.
- :ivar Q: propagation noise covariance matrix (static) :math:`\\mathbf{Q}`.
- :arg alpha: sigma point parameters. Must be 1D array with 5 values.
- :ivar state: state :math:`\\boldsymbol{\\hat{\\chi}}_n`, initialized at
- ``state0``.
- :ivar P: state uncertainty covariance :math:`\\mathbf{P}_n`, initialized at
- ``P0``.
- :arg red_phi: reduced retraction for propagation.
- :arg red_phi_inv: reduced inverse retraction for propagation.
- :arg red_idxs: indices corresponding to the reduced uncertainty.
- :arg up_phi: retraction for update.
- :arg up_idxs: indices corresponding to the state uncertainty for update.
- :arg aug_z: augmentation function :math:`z`. (optional)
- :arg aug_phi: retraction for augmenting state. (optional)
- :arg aug_phi_inv: inverse retraction for augmenting state. (optional)
- :arg aug_idxs: indices corresponding to the state uncertainty for state
- augmentation. (optional)
- :arg aug_q: state uncertainty dimension for augmenting state. (optional)
- """
-
- def __init__(self, f, h, phi, Q, alpha, state0, P0, red_phi,
- red_phi_inv, red_idxs, up_phi, up_idxs,
- aug_z=None, aug_phi=None, aug_phi_inv=None, aug_idxs=np.array([0]),
- aug_q=1):
- self.state = state0
- self.P = P0
- self.f = f
- self.h = h
- self.Q = Q
- self.cholQ = np.linalg.cholesky(Q).T
- self.phi = phi
-
- self.new_state = self.state
- self.F = np.eye(self.P.shape[0])
- self.G = np.zeros((self.P.shape[0], self.Q.shape[0]))
- self.H = np.zeros((0, self.P.shape[0]))
- self.r = np.zeros(0)
- self.R = np.zeros((0, 0))
-
- self.TOL = 1e-9
- self.red_idxs = red_idxs
- self.red_d = red_idxs.shape[0]
- self.up_idxs = up_idxs
- self.up_d = up_idxs.shape[0]
- self.q = Q.shape[0]
-
- # reducing state during propagation
- self.red_phi = red_phi
- self.red_phi_inv = red_phi_inv
- self.red_idxs = red_idxs
-
- # reducing state during update
- self.up_idxs = up_idxs
- self.up_phi = up_phi
-
- # for augmenting state
- self.aug_z = aug_z
- self.aug_d = aug_idxs.shape[0]
- self.aug_idxs = aug_idxs
- self.aug_phi = aug_phi
- self.aug_phi_inv = aug_phi_inv
- self.aug_q = aug_q
-
- self.weights = self.WEIGHTS(self.red_d, Q.shape[0], self.up_d,
- self.aug_d, self.aug_q, alpha)
-
- class WEIGHTS:
- """Sigma point weights.
-
- Weights are computed as:
-
- .. math::
-
- \\lambda &= (\\alpha^2 - 1) \\mathrm{dim}, \\\\
- w_j &= 1/(2(\\mathrm{dim} + \\lambda)), \\\\
- w_m &= \\lambda/(\\lambda + \\mathrm{dim}), \\\\
- w_0 &= \\lambda/(\\lambda + \\mathrm{dim}) + 3 - \\alpha^2,
-
- where :math:`\\alpha` is a parameter set between :math:`10^{-3}` and
- :math:`1`, and :math:`\\mathrm{dim}` the dimension of the sigma-points.
-
- This variable contains sigma point weights for propagation (w.r.t. state
- uncertainty and noise), update and state augmentation.
- """
- def __init__(self, red_d, q, up_d, aug_d, aug_q, alpha):
- self.red_d = self.W(red_d, alpha[0])
- self.q = self.W(q, alpha[1])
- self.up_d = self.W(up_d, alpha[2])
- self.aug_d = self.W(aug_d, alpha[3])
- self.aug_q = self.W(aug_q, alpha[4])
-
- class W:
- def __init__(self, l, alpha):
- m = (alpha**2 - 1) * l
- self.sqrt_d_lambda = np.sqrt(l + m)
- self.wj = 1/(2*(l + m))
- self.wm = m/(m + l)
- self.w0 = m/(m + l) + 3 - alpha**2
-
- def F_num(self, omega, dt):
- """Numerical Jacobian computation of :math:`\mathbf{F}`.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
- P = self.P[np.ix_(self.red_idxs, self.red_idxs)]
- self.F = np.eye(self.P.shape[0])
- # variable sizes
- d = P.shape[0]
- P = P + self.TOL*np.eye(d)
- w = np.zeros(self.q)
-
- w_d = self.weights.red_d
-
- # set sigma points
- xis = w_d.sqrt_d_lambda * np.linalg.cholesky(P).T
- new_xis = np.zeros((2*d, d))
-
- # retract sigma points onto manifold
- for j in range(d):
- s_j_p = self.red_phi(self.state, xis[j])
- s_j_m = self.red_phi(self.state, -xis[j])
- new_s_j_p = self.f(s_j_p, omega, w, dt)
- new_s_j_m = self.f(s_j_m, omega, w, dt)
- new_xis[j] = self.red_phi_inv(self.new_state, new_s_j_p)
- new_xis[d + j] = self.red_phi_inv(self.new_state, new_s_j_m)
-
- # compute covariance
- new_xi = w_d.wj * np.sum(new_xis, 0)
- new_xis = new_xis - new_xi
-
- Xi = w_d.wj * new_xis.T.dot(np.vstack([xis, -xis]))
- self.F[np.ix_(self.red_idxs, self.red_idxs)] = \
- np.linalg.solve(P, Xi.T).T # Xi*P_red^{-1}
-
- def propagation(self, omega, dt):
- """UKF propagation step.
-
- .. math::
-
- \\boldsymbol{\\hat{\\chi}}_{n} &\\leftarrow \\boldsymbol{\\hat{\\chi}}
- _{n+1} = f\\left(\\boldsymbol{\\hat{\\chi}}_{n},
- \\boldsymbol{\\omega}_{n}, \\mathbf{0}\\right) \\\\
- \\mathbf{P}_{n} &\\leftarrow \\mathbf{P}_{n+1} = \\mathbf{F}
- \\mathbf{P}_{n} \\mathbf{F}^T + \\mathbf{G} \\mathbf{Q}
- \\mathbf{G}^T \\\\
-
- Mean state and covariance are propagated. Covariance is propagated as
- an EKF, where Jacobian :math:`\\mathbf{F}` and :math:`\\mathbf{G}` are
- *numerically* inferred.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
-
- self.state_propagation(omega, dt)
- self.F_num(omega, dt)
- self.G_num(omega, dt)
- self.cov_propagation()
-
- def state_propagation(self, omega, dt):
- """Propagate mean state.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
- w = np.zeros(self.q)
- self.new_state = self.f(self.state, omega, w, dt)
-
- def G_num(self, omega, dt):
- """Numerical Jacobian computation of :math:`\mathbf{G}`.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
- w_q = self.weights.q
- new_xis = np.zeros((2*self.q, self.red_d))
-
- # retract sigma points onto manifold
- for j in range(self.q):
- w_p = w_q.sqrt_d_lambda * self.cholQ[j]
- w_m = -w_q.sqrt_d_lambda * self.cholQ[j]
- new_s_j_p = self.f(self.state, omega, w_p, dt)
- new_s_j_m = self.f(self.state, omega, w_m, dt)
- new_xis[j] = self.red_phi_inv(self.new_state, new_s_j_p)
- new_xis[self.q + j] = self.red_phi_inv(self.new_state, new_s_j_m)
-
- # compute covariance
- new_xi = w_q.wj * np.sum(new_xis, 0)
- new_xis = new_xis - new_xi
- Xi = w_q.wj * new_xis.T.dot(np.vstack([self.cholQ, -self.cholQ])) \
- *w_q.sqrt_d_lambda
- self.G = np.zeros((self.P.shape[0], self.q))
- self.G[self.red_idxs] = np.linalg.solve(self.Q, Xi.T).T # Xi*P_red^{-1}
-
- def cov_propagation(self):
- """Covariance propagation.
-
- :var omega: input :math:`\\boldsymbol{\\omega}`.
- :var dt: integration step :math:`dt` (s).
- """
- P = self.F.dot(self.P).dot(self.F.T) + self.G.dot(self.Q).dot(self.G.T)
- self.P = (P+P.T)/2
- self.state = self.new_state
-
- def update(self, y, R):
- """State update, where Jacobian is computed.
-
- :var y: 1D array (vector) measurement :math:`\\mathbf{y}_n`.
- :var R: measurement covariance :math:`\\mathbf{R}_n`.
- """
- self.H_num(y, self.up_idxs, R)
- self.state_update()
-
- def H_num(self, y, idxs, R):
- """Numerical Jacobian computation of :math:`\mathbf{H}`.
-
- :var y: 1D array (vector) measurement :math:`\\mathbf{y}_n`.
- :var idxs: indices corresponding to the state uncertainty for update.
- :var R: measurement covariance :math:`\\mathbf{R}_n`.
- """
-
- P = self.P[np.ix_(idxs, idxs)]
- # set variable size
- d = P.shape[0]
- l = y.shape[0]
-
- P = P + self.TOL*np.eye(d)
-
- # set sigma points
- w_u = self.weights.up_d
- xis = w_u.sqrt_d_lambda * np.linalg.cholesky(P).T
-
- # compute measurement sigma_points
- y_mat = np.zeros((2*d, l))
- hat_y = self.h(self.state)
- for j in range(d):
- s_j_p = self.up_phi(self.state, xis[j])
- s_j_m = self.up_phi(self.state, -xis[j])
- y_mat[j] = self.h(s_j_p)
- y_mat[d + j] = self.h(s_j_m)
-
- # measurement mean
- y_bar = w_u.wm * hat_y + w_u.wj * np.sum(y_mat, 0)
- # prune mean before computing covariance
- y_mat = y_mat - y_bar
-
- Y = w_u.wj*y_mat.T.dot(np.vstack([xis, -xis]))
- H_idx = np.linalg.solve(P, Y.T).T # Y*P_red^{-1}
-
- H = np.zeros((y.shape[0], self.P.shape[0]))
- H[:, idxs] = H_idx
-
- # compute residual
- r = y - y_bar
-
- self.H = np.vstack((self.H, H))
- self.r = np.hstack((self.r, r))
- self.R = block_diag(self.R, R)
-
- def state_update(self):
- """State update, once Jacobian is computed.
- """
-
- S = self.H.dot(self.P).dot(self.H.T) + self.R
- # gain matrix
- K = np.linalg.solve(S, self.P.dot(self.H.T).T).T
-
- # innovation
- xi = K.dot(self.r)
-
- # update state
- self.state = self.phi(self.state, xi)
-
- # update covariance
- P = (np.eye(self.P.shape[0])-K.dot(self.H)).dot(self.P)
- self.P = (P+P.T)/2
-
- # init for next update
- self.H = np.zeros((0, self.P.shape[0]))
- self.r = np.zeros(0)
- self.R = np.zeros((0, 0))
-
- def aug(self, y, aug_idxs, R):
- """State augmentation.
-
- :var y: 1D array (vector) measurement :math:`\\mathbf{y}_n`.
- :var aug_idxs: indices corresponding to the state augmentation
- uncertainty.
- :var R: measurement covariance :math:`\\mathbf{R}_n`.
- """
-
- P = self.P[np.ix_(aug_idxs, aug_idxs)] + self.TOL*np.eye(self.aug_d)
-
- # augment state mean
- aug_state = self.aug_z(self.state, y)
-
- # compute Jacobian and covariance from state
- # set sigma points w.r.t. state
- w_d = self.weights.aug_d
- xis = w_d.sqrt_d_lambda * np.linalg.cholesky(P).T
-
- # compute measurement sigma_points
- zs = np.zeros((2*self.aug_d, self.aug_q))
- for j in range(self.aug_d):
- s_j_p = self.aug_phi(self.state, xis[j])
- s_j_m = self.aug_phi(self.state, -xis[j])
- z_j_p = self.aug_z(s_j_p, y)
- z_j_m = self.aug_z(s_j_m, y)
- zs[j] = self.aug_phi_inv(aug_state, z_j_p)
- zs[self.aug_d + j] = self.aug_phi_inv(aug_state, z_j_m)
-
- # measurement mean
- z_bar = w_d.wj * np.sum(zs, 0)
-
- # prune mean before computing covariance
- zs = zs - z_bar
- P_ss = w_d.wj * zs.T.dot(zs) + w_d.w0*np.outer(z_bar, z_bar)
-
- Xi = w_d.wj * zs.T.dot(np.vstack([xis, -xis]))
- H = np.zeros((self.aug_q, self.P.shape[0]))
- H[:, aug_idxs] = np.linalg.solve(P, Xi.T).T # Xi*P^{-1}
-
- # compute covariance from measurement
- # set sigma points w.r.t. noise
- w_q = self.weights.aug_q
- y_mat = w_q.sqrt_d_lambda * np.linalg.cholesky(R).T
-
- # compute measurement sigma_points
- zs = np.zeros((2*R.shape[0], self.aug_q))
- for j in range(R.shape[0]):
- y_j_p = y + y_mat[j]
- y_j_m = y - y_mat[j]
- z_j_p = self.aug_z(aug_state, y_j_p)
- z_j_m = self.aug_z(aug_state, y_j_m)
- zs[j] = self.aug_phi_inv(aug_state, z_j_p)
- zs[self.aug_q + j] = self.aug_phi_inv(aug_state, z_j_m)
-
- # measurement mean
- z_bar = w_q.wj * np.sum(zs, 0)
-
- # prune mean before computing covariance
- zs = zs - z_bar
- P_zz = w_q.wj * zs.T.dot(zs) + w_q.w0*np.outer(z_bar, z_bar)
-
- # compute augmented covariance
- P_sz = H.dot(self.P)
- P2 = np.zeros((self.P.shape[0] + 2, self.P.shape[0] + 2))
- P2[:self.P.shape[0], :self.P.shape[0]] = self.P
- P2[:self.P.shape[0], self.P.shape[0]:] = P_sz.T
- P2[self.P.shape[0]:, :self.P.shape[0]] = P_sz
- P2[self.P.shape[0]:, self.P.shape[0]:] = P_ss + P_zz
- self.P = P2
-
- self.state = aug_state
-
- # init for next update
- self.H = np.zeros((0, self.P.shape[0]))
- self.r = np.zeros(0)
- self.R = np.zeros((0, 0))
diff --git a/dair_pll_old/dair_pll/urdf_utils.py b/dair_pll_old/dair_pll/urdf_utils.py
deleted file mode 100644
index 37f71c9..0000000
--- a/dair_pll_old/dair_pll/urdf_utils.py
+++ /dev/null
@@ -1,404 +0,0 @@
-"""Utility functions for generating URDF's for a given multibody system.
-
-The ``URDFFindOrDefault`` class searches for elements in an urdf's xml tree,
-and places a default in the event that the element does not exist. Many
-string literals are instantiated here for convenience.
-
-The ``UrdfGeometryRepresentationFactory`` generates URDF XML representations
-of a ``CollisionGeometry``, and ``fill_link_with_parameterization`` dumps
-these representations into a URDF "link" tag.
-"""
-import os.path
-from typing import Dict, List, Optional, Tuple, cast
-from xml.etree import ElementTree
-from xml.etree.ElementTree import register_namespace
-
-from torch import Tensor
-
-from dair_pll import drake_utils, file_utils
-from dair_pll.deep_support_function import extract_obj_from_support_function, \
- extract_obj_from_mesh_summary, get_mesh_summary_from_polygon
-from dair_pll.geometry import CollisionGeometry, Box, Sphere, Polygon, \
- DeepSupportConvex
-from dair_pll.inertia import InertialParameterConverter
-from dair_pll.multibody_terms import MultibodyTerms
-
-# tags
-_ORIGIN = "origin"
-_MASS = "mass"
-_INERTIA = "inertia"
-_INERTIAL = "inertial"
-_VISUAL = "visual"
-_COLLISION = "collision"
-_GEOMETRY = "geometry"
-_BOX = "box"
-_SPHERE = "sphere"
-_CYLINDER = "cylinder"
-_MESH = "mesh"
-_DRAKE_URL = "https://drake.mit.edu/"
-_PROXIMITY_PROPERTIES = "proximity_properties"
-_DRAKE_PROXIMITY_PROPERTIES = '{' + _DRAKE_URL + '}' + _PROXIMITY_PROPERTIES
-_MU_STATIC = "mu_static"
-_DRAKE_MU_STATIC = '{' + _DRAKE_URL + '}' + _MU_STATIC
-
-# attributes
-_VALUE = "value"
-_SIZE = "size"
-_RADIUS = "radius"
-_LENGTH = "length"
-_FILENAME = "filename"
-_XYZ = "xyz"
-_RPY = "rpy"
-_IXX = "ixx"
-_IYY = "iyy"
-_IZZ = "izz"
-_IXY = "ixy"
-_IXZ = "ixz"
-_IYZ = "iyz"
-_INERTIAL_ATTRIBUTES = [_IXX, _IYY, _IZZ, _IXY, _IXZ, _IYZ]
-
-# values
-_ZERO_FLOAT_3 = "0. 0. 0."
-_ZERO_FLOAT = "0."
-
-_POSE_ATTR = {_XYZ: _ZERO_FLOAT_3, _RPY: _ZERO_FLOAT_3}
-_SCALAR_ATTR = {_VALUE: _ZERO_FLOAT}
-
-_URDF_DEFAULT_TREE: Dict[str, List] = {
- _ORIGIN: [],
- _MASS: [],
- _INERTIA: [],
- _INERTIAL: [_ORIGIN, _MASS, _INERTIA],
- _GEOMETRY: [],
- _DRAKE_MU_STATIC: [],
- _DRAKE_PROXIMITY_PROPERTIES: [_DRAKE_MU_STATIC],
- _VISUAL: [_GEOMETRY, _ORIGIN],
- _COLLISION: [_GEOMETRY, _ORIGIN, _DRAKE_PROXIMITY_PROPERTIES],
- _BOX: [],
- _SPHERE: [],
- _CYLINDER: []
-} # pylint: disable=C0303
-"""Default tree structure for URDF elements.
-
-Example:
- elements contains , , and ,
- sub-elements, thus::
- _URDF_DEFAULT_TREE[_INERTIAL] == [_ORIGIN, _MASS, _INERTIA]
-"""
-
-_URDF_DEFAULT_ATTRIBUTES: Dict[str, Dict] = {
- _ORIGIN: _POSE_ATTR,
- _MASS: _SCALAR_ATTR,
- _INERTIA: {i: _ZERO_FLOAT for i in _INERTIAL_ATTRIBUTES},
- _INERTIAL: {},
- _BOX: {
- _SIZE: _ZERO_FLOAT_3
- },
- _SPHERE: {
- _RADIUS: _ZERO_FLOAT
- },
- _CYLINDER: {
- _RADIUS: _ZERO_FLOAT,
- _LENGTH: _ZERO_FLOAT
- },
- _GEOMETRY: {},
- _VISUAL: {},
- _COLLISION: {},
- _DRAKE_PROXIMITY_PROPERTIES: {},
- _DRAKE_MU_STATIC: _SCALAR_ATTR
-}
-"""Default element attributes for URDFs.
-
-Example:
- the tag contains a "radius" parameter with float value, so::
- _URDF_DEFAULT_ATTRIBUTES[_SPHERE] == {_RADIUS: _ZERO_FLOAT}
-"""
-
-
-class UrdfFindOrDefault:
- """URDF XML tool to automatically fill in default element tree structures.
-
- URDF's often represent an identifiable unit (e.g. a body's spatial
- inertia) as a subtree of XML elements. ``URDFFindOrDefault`` implements a
- generalization of the ``xml.etree.ElementTree.find()`` method, which fills
- in a default subtree according to the tree structure given in
- ``_URDF_DEFAULT_TREE``, with each element given tags according to
- ``_URDF_DEFAULT_ATTRIBUTES``.
-
- Typical usage example::
-
- # element is an empty
- # obtain default mass
- mass_element = URDFFindOrDefault.find(element, "mass")
-
- # element is now
- # mass_element is now the child of element,
-
- """
-
- @staticmethod
- def find(element: ElementTree.Element,
- sub_element_type: str) -> ElementTree.Element:
- """Finds an XML sub-element of specified type, adding a default
- element of that type if necessary.
-
- Args:
- element: Element containing the sub-element.
- sub_element_type: Name of the sub-element type.
-
- Returns:
- An ``ElementTree.Element``, of type ``sub_element_type``, which is a
- child of the argument element that either (a) one which existed
- before the function call or (b) the root of a new, default subtree.
-
- Todo:
- * properly consider case where ``element`` already has multiple
- sub-elements of given type.
- """
- current_sub_element: Optional[ElementTree.Element] = element.find(
- sub_element_type)
- if current_sub_element is None:
- default_sub_element = \
- UrdfFindOrDefault.generate_default_element(sub_element_type)
- element.append(default_sub_element)
- return default_sub_element
- return current_sub_element
-
- @staticmethod
- def generate_default_element(element_type: str) -> ElementTree.Element:
- """Generates a default ``ElementTree.Element`` subtree of given type.
-
- Args:
- element_type: Name of the new default element type.
-
- Returns:
- A default ``ElementTree.Element`` of type ``element_type``.
- """
- default_element = ElementTree.Element(element_type)
- default_element.attrib = _URDF_DEFAULT_ATTRIBUTES[element_type]
- for child_element_type in _URDF_DEFAULT_TREE[element_type]:
- default_element.append(
- UrdfFindOrDefault.generate_default_element(child_element_type))
- return default_element
-
-
-class UrdfGeometryRepresentationFactory:
- """Utility class for generating URDF representations of
- ``CollisionGeometry`` instances."""
-
- @staticmethod
- def representation(geometry: CollisionGeometry, output_dir: str,
- link_name: str = None) -> Tuple[str, Dict[str, str]]:
- """Representation of an associated URDF tag that describes the
- properties of this geometry.
-
- Tags are expected to be put inside a ```` tag in the URDF
- file.
-
- Example:
- To output ```` for a ``Sphere``, return the
- following::
-
- ('sphere', {'radius': '5.1'})
- Args:
- geometry: collision geometry to be represented
- output_dir: File directory to store helper files (e.g., meshes).
- link_name: name of link. This only matters for mesh/polygon
- geometries so their externally stored .obj files have unique
- names.
-
- Returns:
- URDF tag and attributes.
- """
- if isinstance(geometry, Polygon):
- return UrdfGeometryRepresentationFactory.polygon_representation(
- geometry, output_dir, link_name)
- if isinstance(geometry, Box):
- return UrdfGeometryRepresentationFactory.box_representation(
- geometry)
- if isinstance(geometry, Sphere):
- return UrdfGeometryRepresentationFactory.sphere_representation(
- geometry)
- if isinstance(geometry, DeepSupportConvex):
- return UrdfGeometryRepresentationFactory.mesh_representation(
- geometry, output_dir, link_name)
- raise TypeError(
- "Unsupported type for CollisionGeometry() to"
- "URDF representation conversion:", type(geometry))
-
- @staticmethod
- def polygon_representation(polygon: Polygon, output_dir: str,
- link_name: str) -> Tuple[str, Dict[str, str]]:
- """Returns URDF representation as ``mesh`` tag with name of saved
- mesh file."""
- mesh_name = f"{link_name}.obj"
- mesh_path = os.path.join(output_dir, mesh_name)
- mesh_summary = get_mesh_summary_from_polygon(polygon)
- file_utils.save_string(
- mesh_path,
- extract_obj_from_mesh_summary(mesh_summary))
-
- return _MESH, {_FILENAME: mesh_name}
-
- @staticmethod
- def box_representation(box: Box) -> Tuple[str, Dict[str, str]]:
- """Returns URDF representation as ``box`` tag with full-length sizes."""
- size = ' '.join([str(2 * i.item()) for i in \
- box.get_half_lengths().view(-1)])
- return _BOX, {_SIZE: size}
-
- @staticmethod
- def sphere_representation(sphere: Sphere) -> Tuple[str, Dict[str, str]]:
- """Returns URDF representation as ``sphere`` tag with radius
- attribute."""
- return _SPHERE, {_RADIUS: str(sphere.get_radius().item())}
-
- @staticmethod
- def mesh_representation(convex: DeepSupportConvex, output_dir: str,
- link_name: str) -> Tuple[str, Dict[str, str]]:
- """Returns URDF representation as ``mesh`` tag with name of saved
- mesh file."""
- mesh_name = f"{link_name}.obj"
- mesh_path = os.path.join(output_dir, mesh_name)
- file_utils.save_string(
- mesh_path,
- extract_obj_from_support_function(convex.network))
-
- return _MESH, {_FILENAME: mesh_name}
-
-
-def fill_link_with_parameterization(element: ElementTree.Element, pi_cm: Tensor,
- geometries: List[CollisionGeometry],
- friction_coeffs: Tensor,
- output_dir: str,
- link_name: str = None) -> None:
- """Convert pytorch inertial and geometric representations to URDF elements.
-
- Args:
- element: XML "link" tag in which representation is stored.
- pi_cm: (10,) inertial representation of link in ``pi_cm``
- parameterization.
- geometries: All geometries attached to body.
- friction_coeffs: All friction coefficients associated with each
- geometry. The ``Tensor`` will be of shape ``(len(geometries),)``.
- output_dir: File directory to store helper files (e.g., meshes).
-
- Warning:
- Does not handle multiple geometries.
- Todo:
- Handle multiple geometries for body.
- Raises:
- NotImplementedError: when multiple geometries are provided.
- """
- # pylint: disable=too-many-locals
- if len(geometries) > 1:
- raise NotImplementedError("generating a URDF with multiple geometries"
- "per body not implemented yet.")
- mass, p_BoBcm_B, I_BBcm_B = \
- InertialParameterConverter.pi_cm_to_urdf(pi_cm)
-
- # This will have to change when function can handle more than one geometry.
- mu = str(friction_coeffs.item())
-
- body_inertial_element = UrdfFindOrDefault.find(element, _INERTIAL)
-
- UrdfFindOrDefault.find(body_inertial_element, _MASS).set(_VALUE, mass)
- UrdfFindOrDefault.find(body_inertial_element, _ORIGIN).set(_XYZ, p_BoBcm_B)
-
- body_inertia_element = UrdfFindOrDefault.find(body_inertial_element,
- _INERTIA)
- body_inertia_element.attrib = dict(zip(_INERTIAL_ATTRIBUTES, I_BBcm_B))
-
- for geometry in geometries:
- collision_element = UrdfFindOrDefault.find(element, _COLLISION)
- visual_element = UrdfFindOrDefault.find(element, _VISUAL)
- geometry_elements = [
- UrdfFindOrDefault.find(collision_element, _GEOMETRY),
- UrdfFindOrDefault.find(visual_element, _GEOMETRY)
- ]
-
- (shape_tag, shape_attributes) = \
- UrdfGeometryRepresentationFactory.representation(geometry,
- output_dir,
- link_name=link_name)
- for geometry_element in geometry_elements:
- shape_element = UrdfFindOrDefault.find(geometry_element, shape_tag)
- shape_element.attrib = shape_attributes
-
- prox_props_element = UrdfFindOrDefault.find(collision_element,
- _DRAKE_PROXIMITY_PROPERTIES)
- UrdfFindOrDefault.find(prox_props_element, _DRAKE_MU_STATIC).set(
- _VALUE, mu)
-
-
-def represent_multibody_terms_as_urdfs(multibody_terms: MultibodyTerms,
- output_dir: str) -> Dict[str, str]:
- """Renders the current parameterization of multibody terms as a
- set of urdfs.
-
- Args:
- multibody_terms: Multibody dynamics representation to convert.
- output_dir: File directory to store helper files (e.g., meshes).
- Returns:
- Dictionary of (urdf name, urdf XML string) pairs.
- Warning:
- For now, assumes that each URDF link element ``e`` gets modeled as a
- corresponding body ``b`` with ``b.name() == e.get("name")``.
- Drake however does not guarantee this relationship. A more stable
- implementation would be to directly edit the MultibodyPlant, but this
- would make the representation less portable.
- """
- # pylint: disable=too-many-locals
- urdf_xml = {}
- _, all_body_ids = \
- drake_utils.get_all_inertial_bodies(
- multibody_terms.plant_diagram.plant,
- multibody_terms.plant_diagram.model_ids)
- pi_cm = multibody_terms.lagrangian_terms.pi_cm()
- friction_coeffs = multibody_terms.contact_terms.get_friction_coefficients()
-
- for urdf_name, urdf in multibody_terms.urdfs.items():
-
- # assumes urdf name mirrors model name
- model_instance_index = \
- multibody_terms.plant_diagram.plant.GetModelInstanceByName(
- urdf_name)
-
- urdf_tree = ElementTree.parse(urdf)
-
- for element in urdf_tree.iter():
- if element.tag == "link":
- link_name = element.get("name")
- assert link_name is not None
-
- body_id = drake_utils.unique_body_identifier(
- multibody_terms.plant_diagram.plant,
- multibody_terms.plant_diagram.plant.GetBodyByName(
- cast(str, element.get("name")),
- model_instance_index))
- if body_id not in all_body_ids:
- # body does not have inertial attributes,
- # for instance, the world body.
- continue
- body_index = all_body_ids.index(body_id)
- body_geometry_indices = \
- multibody_terms.geometry_body_assignment[body_id]
- body_geometries = [
- cast(CollisionGeometry,
- multibody_terms.contact_terms.geometries[index])
- for index in body_geometry_indices
- ]
- body_friction_coeffs = friction_coeffs[body_geometry_indices]
-
- fill_link_with_parameterization(element, pi_cm[body_index, :],
- body_geometries,
- body_friction_coeffs,
- output_dir,
- link_name=link_name)
-
- register_namespace('drake', _DRAKE_URL)
- system_urdf_representation = ElementTree.tostring(
- urdf_tree.getroot(), encoding="utf-8").decode("utf-8")
- urdf_xml[
- urdf_name] = f'\n{system_urdf_representation}'
- return urdf_xml
diff --git a/dair_pll_old/dair_pll/vector_fields.py b/dair_pll_old/dair_pll/vector_fields.py
deleted file mode 100644
index 6fcb484..0000000
--- a/dair_pll_old/dair_pll/vector_fields.py
+++ /dev/null
@@ -1,133 +0,0 @@
-"""Definitions for randomly generated force vector fields for testing the
-capabilities of residual physics in simulation.
-"""
-
-from abc import ABC, abstractmethod
-import pdb
-from typing import Tuple
-
-import numpy as np
-
-from pydrake.systems.framework import LeafSystem
-
-
-
-ROTATION_PRIMITIVE = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])
-INWARD_PRIMITIVE = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 0]])
-
-
-class ForceVectorField(ABC):
- """Class that keeps track of artificially created force vector fields."""
-
- def __init__(self, n_velocity: int) -> None:
- self.n_velocity = n_velocity
-
- @abstractmethod
- def generalized_force_by_state(self, state: np.ndarray) -> np.ndarray:
- """Given a system state, return a generalized force."""
- pass
-
-
-class VortexForceVectorField(ForceVectorField):
- """Specifically generate a vortex like a toilet bowl."""
-
- def __init__(self, n_velocity: int,
- center_xy: Tuple[float, float] = (0., 0.),
- rotation_scaling: float = 1.,
- inward_scaling: float = 1.,
- height_std_dev: float = 1.) -> None:
- super().__init__(n_velocity)
-
- self.center_x = center_xy[0]
- self.center_y = center_xy[1]
- self.w_rot = rotation_scaling
- self.w_in = inward_scaling
- self.z_std_dev = height_std_dev
-
- def generalized_force_by_state(self, state: np.ndarray) -> np.ndarray:
- """Given a system state, return a generalized force. For this vortex
- force vector field, the generalized force depends only on the system's
- location in space.
-
- TODO:
- - handle batched configurations
- - factor z height into it
- - maybe add a z torque as well: be careful, this would need to be in
- world z coordinates, not body (just need to check if this uses
- world or body coordinates, not sure).
- """
- xyz_loc = state[4:7]
-
- xy_mag = np.linalg.norm(xyz_loc[:2]) + 1e-4
- rotation_mat = self.w_rot * ROTATION_PRIMITIVE / xy_mag
- inward_mat = self.w_in * INWARD_PRIMITIVE / xy_mag
-
- force = (rotation_mat + inward_mat) @ xyz_loc
-
- generalized_force = np.zeros((self.n_velocity))
- generalized_force[3:6] = force
-
- return generalized_force
-
-
-class ViscousDampingVectorField(ForceVectorField):
- """Specifically add viscous damping to linear, angular, and articulation
- velocities."""
-
- def __init__(self, n_velocity: int, w_linear: float = 0.0,
- w_angular: float = 0.0, w_articulation: float = 0.0):
- super().__init__(n_velocity)
-
- self.w_linear = w_linear
- self.w_angular = w_angular
- self.w_articulation = w_articulation
-
- def generalized_force_by_state(self, state: np.ndarray) -> np.ndarray:
- """Given a system state, return a generalized force. For this viscous
- damping vector field, the generalized forces depend only on the
- velocity components of the state.
- """
- vels = state[-self.n_velocity:]
-
- if np.any(np.isnan(vels)):
- pdb.set_trace()
-
- generalized_force = np.concatenate((
- -self.w_linear * vels[:3],
- -self.w_angular * vels[3:6],
- -self.w_articulation * vels[6:]))
-
- return generalized_force
-
-
-
-class ForceVectorFieldInjectorLeafSystem(LeafSystem):
- """Create a Drake ``LeafSystem`` which can inject forces from a force vector
- field into the dynamics of a Multibody Plant.
- """
- def __init__(self, n_state: int, n_velocity: int,
- vector_field: ForceVectorField):
- super().__init__()
-
- # Store the force vector field.
- self.vector_field = vector_field
-
- # Create an input port for the current state of the system.
- self.mbp_state_input_port = self.DeclareVectorInputPort(
- name="mbp_state", size=n_state)
-
- # Create an output port for the generalized forces.
- self.DeclareVectorOutputPort(name="force_vector", size=n_velocity,
- calc=self.CalculateVectorField)
-
- def CalculateVectorField(self, context, output):
- # Evaluate the input ports to obtain the current multibody plant state.
- mbp_state = self.mbp_state_input_port.Eval(context)
-
- # Generate the generalized force from the multibody plant state.
- generalized_force = \
- self.vector_field.generalized_force_by_state(mbp_state)
-
- # Write the output vector.
- output.SetFromVector(generalized_force)
-
diff --git a/dair_pll_old/dair_pll/vis_utils.py b/dair_pll_old/dair_pll/vis_utils.py
deleted file mode 100644
index 0f79330..0000000
--- a/dair_pll_old/dair_pll/vis_utils.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# """Utility functions for visualizing trajectories.
-
-# Visualization of Drake systems can be done with Drake's VideoWriter. This allows
-# for a relatively thin implementation of visualization for very complex
-# geometries.
-
-# The main contents of this file are as follows:
-
-# * A method to generate a dummy ``DrakeSystem`` which simultaneously
-# visualizes two trajectories of the same system.
-# * A method which takes a ``DrakeSystem`` and corresponding trajectory,
-# captures a visualization video, and outputs it as a numpy ndarray.
-# """
-# from copy import deepcopy
-# from typing import Tuple, Optional
-
-# import numpy as np
-# import torch
-# from torch import Tensor
-# from PIL import Image
-# # pylint: disable-next=import-error
-# from pydrake.geometry import Role, RoleAssign, Rgba # type: ignore
-# from torch import Tensor
-
-# from dair_pll.drake_system import DrakeSystem
-# from dair_pll import file_utils
-
-# RESOLUTION = [640, 480]
-# RED = Rgba(0.6, 0.0, 0.0, 0.5)
-# BLUE = Rgba(0.0, 0.0, 0.6, 0.7)
-# BASE_SYSTEM_DEFAULT_COLOR = RED
-# LEARNED_SYSTEM_DEFAULT_COLOR = BLUE
-# PERCEPTION_COLOR_GROUP = 'phong'
-# PERCEPTION_COLOR_PROPERTY = 'diffuse'
-# LEARNED_TAG = '__learned__'
-# GEOMETRY_INSPECTION_TRAJECTORY_LENGTH = 1000
-# HALF_STEPS = int(GEOMETRY_INSPECTION_TRAJECTORY_LENGTH/2)
-
-# FULL_SPIN_HALF_TIME = torch.stack([
-# Tensor([np.cos(torch.pi*i/HALF_STEPS), 0, 0,
-# np.sin(torch.pi*i/HALF_STEPS)]) \
-# for i in range(HALF_STEPS)
-# ])
-# ARTICULATION_FULL_SPIN_HALF_TIME = torch.stack([
-# Tensor([2*torch.pi*i/HALF_STEPS])
-# for i in range(HALF_STEPS)
-# ])
-# LINEAR_LOCATION_HALF_TIME = Tensor([1.2, 0, 0.15]).repeat(HALF_STEPS, 1)
-
-
-# def get_geometry_inspection_trajectory(learned_system: DrakeSystem) -> Tensor:
-# """Return a trajectory to use to inspect the learned geometry of a system.
-
-# Notes:
-# Only works for the cube and elbow experiments, or actually more
-# generally for one floating body with or without one articulation joint.
-
-# Args:
-# learned_system: This system is used as an input only for determining the
-# size of the system's state space.
-
-# Returns:
-# (n_steps, n_x) tensor of a trajectory.
-# """
-# n_q = learned_system.space.n_q
-# n_v = learned_system.space.n_v
-
-# # Velocities don't matter -- set to zero.
-# vels = torch.zeros((HALF_STEPS, n_v))
-
-# # Rotation and articulation depend on if there's articulation or not.
-# if n_q == 7:
-# rotation_piece = torch.cat(
-# (FULL_SPIN_HALF_TIME, LINEAR_LOCATION_HALF_TIME, vels), dim=1)
-
-# trajectory = rotation_piece
-
-# elif n_q == 8:
-# rotation_piece = torch.cat(
-# (FULL_SPIN_HALF_TIME, LINEAR_LOCATION_HALF_TIME,
-# torch.zeros((HALF_STEPS, 1)), vels), dim=1)
-
-# rotate_and_articulate = torch.cat(
-# (FULL_SPIN_HALF_TIME, LINEAR_LOCATION_HALF_TIME,
-# ARTICULATION_FULL_SPIN_HALF_TIME, vels), dim=1)
-
-# trajectory = torch.cat((rotation_piece, rotate_and_articulate), dim=0)
-
-# else:
-# raise NotImplementedError(f'Don\'t know how to handle a system ' + \
-# f'other than the cube or elbow or similar.')
-
-# return trajectory
-
-
-# def generate_visualization_system(
-# base_system: DrakeSystem,
-# visualization_file: str,
-# learned_system: Optional[DrakeSystem] = None,
-# base_system_color: Rgba = BASE_SYSTEM_DEFAULT_COLOR,
-# learned_system_color: Rgba = LEARNED_SYSTEM_DEFAULT_COLOR,
-# ) -> DrakeSystem:
-# """Generate a dummy ``DrakeSystem`` for visualizing comparisons between two
-# trajectories of ``base_system``.
-
-# Does so by generating a new ``DrakeSystem`` in which every model in the
-# base system has a copy. Each illustration geometry element in these two
-# copies is uniformly colored to be visually distinguishable.
-
-# The copy of the base system can optionally be rendered in its learned
-# geometry.
-
-# Args:
-# base_system: System to be visualized.
-# visualization_file: Output GIF filename for trajectory video.
-# learned_system: Optionally, the learned system so the predicted
-# trajectory is rendered with the learned geometry.
-# base_system_color: Color to repaint every thing in base system.
-# learned_system_color: Color to repaint every thing in duplicated system.
-
-# Returns:
-# New ``DrakeSystem`` with doubled state and repainted elements.
-# """
-# # pylint: disable=too-many-locals
-# # Start with true base system.
-# double_urdfs = deepcopy(base_system.urdfs)
-# double_urdfs.update({
-# k: file_utils.get_geometrically_accurate_urdf(v) for k, v in \
-# double_urdfs.items()
-# })
-
-# # Either copy the base system's geometry or optionally use the learned
-# # geometry.
-# if learned_system is None:
-# double_urdfs.update({
-# (k + LEARNED_TAG): file_utils.get_geometrically_accurate_urdf(v) \
-# for k, v in double_urdfs.items()
-# })
-
-# else:
-# double_urdfs.update({
-# (k + LEARNED_TAG): v for k, v in learned_system.urdfs.items()
-# })
-
-# visualization_system = DrakeSystem(double_urdfs,
-# base_system.dt,
-# visualization_file=visualization_file)
-
-# # Recolors every perception geometry to default colors
-# plant_diagram = visualization_system.plant_diagram
-# plant = plant_diagram.plant
-# scene_graph = plant_diagram.scene_graph
-# scene_graph_context = scene_graph.GetMyContextFromRoot(
-# plant_diagram.sim.get_mutable_context())
-# inspector = scene_graph.model_inspector()
-# for model_id in plant_diagram.model_ids:
-# model_name = plant.GetModelInstanceName(model_id)
-# for body_index in plant.GetBodyIndices(model_id):
-# body_frame = plant.GetBodyFrameIdOrThrow(body_index)
-# for geometry_id in inspector.GetGeometries(body_frame,
-# Role.kPerception):
-# props = inspector.GetPerceptionProperties(geometry_id)
-# # phong.diffuse is the name of property controlling perception
-# # color.
-# if props and \
-# props.HasProperty(PERCEPTION_COLOR_GROUP, \
-# PERCEPTION_COLOR_PROPERTY):
-# # Sets color in properties.
-# props.UpdateProperty(
-# PERCEPTION_COLOR_GROUP, PERCEPTION_COLOR_PROPERTY,
-# learned_system_color
-# if LEARNED_TAG in model_name else base_system_color)
-# # Tells ``scene_graph`` to update the color.
-# plant_source_id = plant.get_source_id()
-# assert plant_source_id is not None
-
-# scene_graph.RemoveRole(scene_graph_context, plant_source_id,
-# geometry_id, Role.kPerception)
-# scene_graph.AssignRole(scene_graph_context, plant_source_id,
-# geometry_id, props, RoleAssign.kNew)
-
-# # Changing perception properties requires the ``Simulator`` to be
-# # re-initialized.
-# plant_diagram.sim.Initialize()
-
-# return visualization_system
-
-
-# def visualize_trajectory(drake_system: DrakeSystem,
-# x_trajectory: Tensor,
-# framerate: int = 30) -> Tuple[np.ndarray, int]:
-# r"""Visualizes trajectory of system.
-
-# Specifies a ``framerate`` for output video, though should be noted that
-# this framerate is only approximately represented by homogeneous integer
-# downsampling of the state trajectory. For example, ``if drake_system.dt ==
-# 1/60`` and ``framerate == 11``, the true video framerate will be::
-
-# max(round(60/11), 1) == 5.
-
-# Args:
-# drake_system: System associated with provided trajectory.
-# x_trajectory: (T, drake_system.space.n_x) state trajectory.
-# framerate: desired frames per second of output video.
-
-# Returns:
-# (1, T, 3, H, W) ndarray video capture of trajectory with resolution
-# H x W, which are set to 480x640 in :py:mod:`dair_pll.drake_utils`.
-# The true framerate, rounded to an integer.
-
-# Todo:
-# Only option for implementation at the moment is to access various
-# protected members of :py:class:`pydrake.visualization.VideoWriter`\ .
-# This function should be updated as `pydrake` has this functionality
-# properly exposed.
-# """
-# assert drake_system.plant_diagram.visualizer is not None
-# assert x_trajectory.dim() == 2
-# # pylint: disable=protected-access
-
-# vis = drake_system.plant_diagram.visualizer
-# sim = drake_system.plant_diagram.sim
-
-# # Downsample trajectory to approximate framerate.
-# temporal_downsample = max(round((1 / drake_system.dt) / framerate), 1)
-# actual_framerate = round((1 / drake_system.dt) / temporal_downsample)
-# x_trajectory = x_trajectory[::temporal_downsample, :]
-
-# # Clear the images before iterating through the trajectory (by default the
-# # video starts with one image of the systems at the origin).
-# vis._pil_images = [] # type: ignore
-
-# # Simulate the system according to the provided data.
-# _, carry = drake_system.sample_initial_condition()
-# for x_current in x_trajectory:
-# drake_system.preprocess_initial_condition(x_current.unsqueeze(0), carry)
-
-# # Force publish video frame.
-# sim_context = sim.get_mutable_context()
-# video_context = vis.GetMyContextFromRoot(sim_context)
-# vis._publish(video_context)
-
-# # Compose a video ndarray of shape (T, H, W, 4[rgba]).
-# video = np.stack([np.asarray(frame) for frame in vis._pil_images
-# ]) # type: ignore
-# vis.Save()
-
-# # Since Drake's VideoWriter defaults to not looping gifs, re-load and re-
-# # save the gif to ensure it loops. This gif is only for debugging purposes,
-# # as the gif gets overwritten with every trajectory. The actual output of
-# # this function is a numpy array.
-# vizualization_image = Image.open(vis._filename) # type: ignore
-# new_name = vis._filename.split('.')[0] + '_.gif' # type: ignore
-# vizualization_image.save(new_name, save_all=True, loop=0)
-# vizualization_image.close()
-
-# # Remove alpha channel and reorder axes to output type.
-# video = np.expand_dims(np.moveaxis(video, 3, 1), 0)
-# video = video[:, :, :3, :, :]
-# return video, actual_framerate
diff --git a/dair_pll_old/dair_pll/wandb_manager.py b/dair_pll_old/dair_pll/wandb_manager.py
deleted file mode 100644
index 7479be2..0000000
--- a/dair_pll_old/dair_pll/wandb_manager.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""Interface for logging training progress to Weights and Biases."""
-import time
-from dataclasses import dataclass
-from typing import Dict, Tuple, Optional, Any
-
-import numpy as np
-import wandb
-
-from dair_pll.hyperparameter import hyperparameter_values
-from dair_pll.system import MeshSummary
-
-WANDB_ALLOW = "allow"
-WANDB_NEVER = "never"
-
-
-def _write_scalars(epoch: int, scalars: Dict[str, float]) -> None:
- """Logs scalars."""
- wandb.log(scalars, step=epoch)
-
-
-def _write_videos(epoch: int, videos: Dict[str, Tuple[np.ndarray,
- int]]) -> None:
- """Logs videos."""
- wandb_videos = {
- video_name: wandb.Video(video_array[0], fps=fps)
- for video_name, (video_array, fps) in videos.items()
- }
- wandb.log(wandb_videos, step=epoch)
-
-
-def _write_meshes(epoch: int, meshes: Dict[str, MeshSummary]) -> None:
- """Logs meshes."""
- wandb_meshes = {
- mesh_name: wandb.Object3D(mesh_summary.vertices.detach().numpy())
- for mesh_name, mesh_summary in meshes.items()
- }
- wandb.log(wandb_meshes, step=epoch)
-
-
-@dataclass
-class WeightsAndBiasesManager:
- """Manages logging of the training process.
-
- Given a set of scalars, videos, and meshes, writes to Weights and Biases
- at https://wandb.ai .
- """
- run_name: str
- """Display name for Weights and Biases experiment run."""
- directory: str
- """Absolute path to store metadata."""
- project_name: str
- """Unique name for W&B project, analogous to a ``dair_pll`` experiment."""
- resume_from_id: Optional[str] = None
- """Allow W&B to resume a unique run ID if provided."""
-
- def _setup_wandb_run_id(self) -> str:
- """Generates unique run ID for Weights and Biases by concatenating
- the run name and a timestamp. If resumption is allowed, returns the
- saved run ID."""
- if self.resume_from_id is not None:
- return self.resume_from_id
-
- timestamp = str(time.time_ns() // 1000)
-
- return f"{self.run_name}_{timestamp}"
-
- def launch(self) -> str:
- r"""Launches experiment run on Weights & Biases.
-
- Returns:
- The run ID of the launched run.
- """
- resuming = self.resume_from_id is not None
- wandb_run_id = self._setup_wandb_run_id()
-
- wandb.init(project=self.project_name,
- dir=self.directory,
- name=self.run_name,
- id=wandb_run_id,
- config={},
- resume=WANDB_ALLOW if resuming else WANDB_NEVER)
-
- return wandb_run_id
-
- @staticmethod
- def log_config(config: Any):
- """Log experiment hyperparameter values."""
- wandb.config.update(hyperparameter_values(config))
- wandb.config.update({"ExperimentConfig": str(config)})
-
- @staticmethod
- def update(epoch: int, scalars: Dict[str, float],
- videos: Dict[str, Tuple[np.ndarray, int]],
- meshes: Dict[str, MeshSummary]) -> None:
- """Write new epoch summary to Weights and Biases.
-
- Args:
- epoch: Current epoch in training process
- scalars: Scalars to log.
- videos: Videos to log.
- meshes: Meshes to log.
- """
-
- _write_scalars(epoch, scalars)
- _write_videos(epoch, videos)
- _write_meshes(epoch, meshes)
diff --git a/dair_pll_old/docsrc/Makefile b/dair_pll_old/docsrc/Makefile
deleted file mode 100644
index 580f6f1..0000000
--- a/dair_pll_old/docsrc/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line, and also
-# from the environment for the first two.
-SPHINXOPTS ?=
-SPHINXBUILD ?= sphinx-build
-SOURCEDIR = source
-BUILDDIR = docs
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-github:
- @make html
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/dair_pll_old/docsrc/build_docs.py b/dair_pll_old/docsrc/build_docs.py
deleted file mode 100644
index 7405f74..0000000
--- a/dair_pll_old/docsrc/build_docs.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import json
-import os
-from os import path
-
-import click
-import networkx as nx
-
-MODULE_NAME = 'dair_pll'
-DOCS_DIR = path.dirname(__file__)
-PROJECT_DIR = path.dirname(DOCS_DIR)
-SOURCE_DIR = path.join(DOCS_DIR, 'source')
-BUILD_DIR = path.join(DOCS_DIR, 'docs')
-PUBLISH_DIR = path.join(PROJECT_DIR, 'docs')
-MODULE_DIR = path.join(PROJECT_DIR, MODULE_NAME)
-TEMPLATE_DIR = path.join(DOCS_DIR, 'templates')
-INDEX_RST = path.join(SOURCE_DIR, 'index.rst')
-MODULES_RST = path.join(SOURCE_DIR, 'modules.rst')
-MODULE_RST = path.join(SOURCE_DIR, f'{MODULE_NAME}.rst')
-INDEX_TEXT_RST = path.join(SOURCE_DIR, 'index_text.rst.template')
-DEP_JSON_FILE = path.join(DOCS_DIR, 'graph.json')
-
-
-# regenerate .rst's
-def build(regenerate_deps: bool = False):
- # remove any old documentation
- os.system(f'rm {SOURCE_DIR}/{MODULE_NAME}.*')
-
- # generate new .rst's
- os.system(
- f'sphinx-apidoc -f -o {SOURCE_DIR} {MODULE_DIR} -e --templatedir'
- f'={TEMPLATE_DIR}')
-
- # generate index.rst
- with open(INDEX_TEXT_RST, 'r', encoding='utf-8') as index_text_file:
- index_text = index_text_file.readlines()
-
- toc_setup = [
- '',
- '.. toctree::',
- ' :maxdepth: 8',
- ' :caption: Submodules:',
- ' :titlesonly:',
- '',
- ]
- index_text.extend([f'{line}\n' for line in toc_setup])
-
- # build dependency graph
- if regenerate_deps:
- os.system(
- f'pydeps --only {MODULE_NAME} --show-deps {MODULE_DIR} --no-output'
- ' --debug'
- f' > {DEP_JSON_FILE}')
-
- with open(DEP_JSON_FILE, 'r', encoding='utf-8') as dep_file:
- dep_graph = json.load(dep_file)
- # print(dep_graph)
- graph = nx.DiGraph()
- edges = []
- excludes = ['dair_pll']
- main_document = 'dair_pll.drake_experiment'
- for module, module_details in dep_graph.items():
- if 'imports' not in module_details:
- continue
- if module in excludes:
- continue
- for imported_module in module_details['imports']:
- if imported_module in excludes:
- continue
- edges.append((module, imported_module))
- graph.add_edges_from(edges)
-
- if not nx.is_directed_acyclic_graph(graph):
- raise ValueError('import cycle detected in package!')
-
- mod_list = []
-
- # define tiebreak ordering for topological dependency sort
- def tiebreak(x):
- # Put full module first.
- if not '.' in x:
- return 0
-
- # Next, put utility files.
- if '_utils' in x:
- return -1
-
- # Last, everything else.
- return -2
-
- # sort submodules by top-down dependency
- # pdb.set_trace()
- for module in nx.lexicographical_topological_sort(graph, key=tiebreak):
- mod_list.append(str(module))
-
- # Reverse to bottom-up dependency
- mod_list.reverse()
-
- # sort for nodes relevant to docs
- reachable = nx.single_source_shortest_path(graph, main_document).keys()
- not_reached = [module for module in mod_list if not module in reachable]
- mod_list = [module for module in mod_list if module in reachable]
-
- # add modules to table of contents.
- for module in mod_list:
- index_text.append(f' {module}\n')
-
- index_text.append(f' bibliography\n')
-
- with open(INDEX_RST, 'w', encoding='utf-8') as index_file:
- index_file.write(''.join(index_text))
-
- # remove unused files
- remove_files = [path.join(SOURCE_DIR, f'{file_to_remove}.rst')
- for file_to_remove in set(not_reached + excludes)]
- remove_files += [MODULES_RST]
-
- for filename in remove_files:
- os.system(f'rm {filename}')
-
- # build html
- os.system(f'sphinx-build -b html {SOURCE_DIR} {PUBLISH_DIR}')
-
-
-@click.group()
-def cli():
- pass
-
-
-@cli.command()
-@click.option('--regenerate_deps/--use_stored_deps', default=False,
- help="reorder submodules by dependency")
-def build_command(regenerate_deps: bool):
- build(regenerate_deps)
-
-
-if __name__ == '__main__':
- build_command()
diff --git a/dair_pll_old/docsrc/buildsite.sh b/dair_pll_old/docsrc/buildsite.sh
deleted file mode 100644
index 9365429..0000000
--- a/dair_pll_old/docsrc/buildsite.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-set -x
-
-export GIT_PWD=$(pwd ls -lah)
-git config --global --add safe.directory ${GIT_PWD}
-export SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
-
-##############
-# BUILD DOCS #
-##############
-
-# build tools
-pip install --upgrade pip setuptools wheel
-
-# git deps
-pip install git+https://github.com/mshalm/sappy.git
-pip install git+https://github.com/DAIRLab/drake-pytorch.git
-# install package to get deps
-pip install -e .
-
-# build docs from source
-python docsrc/build_docs.py --regenerate_deps
-
-#######################
-# Update GitHub Pages #
-#######################
-
-git config --global user.name "${GITHUB_ACTOR}"
-git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
-
-docroot=`mktemp -d`
-rsync -av "docs/" "${docroot}/"
-
-pushd "${docroot}"
-
-git init
-git remote add deploy "https://token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git"
-git checkout -b gh-pages
-
-# Adds .nojekyll file to the root to signal to GitHub that
-# directories that start with an underscore (_) can remain
-touch .nojekyll
-
-# Add README
-cat > README.md <NUL 2>NUL
-if errorlevel 9009 (
- echo.
- echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
- echo.installed, then set the SPHINXBUILD environment variable to point
- echo.to the full path of the 'sphinx-build' executable. Alternatively you
- echo.may add the Sphinx directory to PATH.
- echo.
- echo.If you don't have Sphinx installed, grab it from
- echo.https://www.sphinx-doc.org/
- exit /b 1
-)
-
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-goto end
-
-:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-
-:end
-popd
diff --git a/dair_pll_old/docsrc/source/bibliography.rst b/dair_pll_old/docsrc/source/bibliography.rst
deleted file mode 100644
index 94bf651..0000000
--- a/dair_pll_old/docsrc/source/bibliography.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Bibliography
-=================================
-
-.. bibliography::
\ No newline at end of file
diff --git a/dair_pll_old/docsrc/source/conf.py b/dair_pll_old/docsrc/source/conf.py
deleted file mode 100644
index 895350a..0000000
--- a/dair_pll_old/docsrc/source/conf.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Configuration file for the Sphinx documentation builder.
-#
-# This file only contains a selection of the most common options. For a full
-# list see the documentation:
-# https://www.sphinx-doc.org/en/master/usage/configuration.html
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
-
-
-# -- Project information -----------------------------------------------------
-
-project = 'dair_pll'
-copyright = '2022, Mathew Halm & DAIR Lab'
-author = 'Mathew Halm'
-
-# The full version, including alpha/beta/rc tags
-release = 'v0.0.1'
-
-# -- General configuration ---------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = ['sphinx.ext.napoleon',
- 'sphinx_toolbox.more_autodoc.typehints',
- 'sphinx.ext.viewcode',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo',
- 'sphinxcontrib.bibtex'
- ]
-
-bibtex_bibfiles = ['references.bib']
-html_static_path = []
-
-autoclass_content = 'both'
-
-intersphinx_mapping = {
- 'pydrake': ('https://drake.mit.edu/pydrake/', None),
- 'torch': ('https://pytorch.org/docs/stable/', None),
- 'python': ('https://docs.python.org/3', None),
- 'numpy': ('https://numpy.org/doc/1.21/', None),
- 'optuna': ('https://optuna.readthedocs.io/en/stable/', None)
-}
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['index_text']
-
-autodoc_type_aliases = {'DrakeSpatialInertia':
- 'dair_pll.drake_utils.DrakeSpatialInertia',
- 'DrakeBody':
- 'dair_pll.drake_utils.DrakeBody'
- }
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-#
-html_theme = 'sphinx_rtd_theme'
-html_theme_options = {
- 'navigation_depth': 8,
- 'sticky_navigation': True,
- 'collapse_navigation': False
-}
-
-autodoc_member_order = 'bysource'
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
diff --git a/dair_pll_old/docsrc/source/index_text.rst.template b/dair_pll_old/docsrc/source/index_text.rst.template
deleted file mode 100644
index 289cffc..0000000
--- a/dair_pll_old/docsrc/source/index_text.rst.template
+++ /dev/null
@@ -1,2 +0,0 @@
-Physics-based learning tools from DAIR Lab
-==============================================
diff --git a/dair_pll_old/docsrc/source/references.bib b/dair_pll_old/docsrc/source/references.bib
deleted file mode 100644
index d953ea4..0000000
--- a/dair_pll_old/docsrc/source/references.bib
+++ /dev/null
@@ -1,37 +0,0 @@
-@InProceedings{AmosICNN2017,
- title = {Input Convex Neural Networks},
- author = {Brandon Amos and Lei Xu and J. Zico Kolter},
- booktitle = {Proceedings of the 34th International Conference on Machine Learning},
- pages = {146--155},
- year = {2017},
- editor = {Precup, Doina and Teh, Yee Whye},
- volume = {70},
- series = {Proceedings of Machine Learning Research},
- month = {06--11 Aug},
- publisher = {PMLR},
- pdf = {http://proceedings.mlr.press/v70/amos17b/amos17b.pdf},
- url = {https://proceedings.mlr.press/v70/amos17b.html}
-}
-
-@inproceedings{brossardCode2019,
- author={Martin Brossard and Axel Barrau and Silvère Bonnabel},
- title={{A Code for Unscented Kalman Filtering on Manifolds (UKF-M)}},
- booktitle={2020 International Conference on Robotics and Automation (ICRA)},
- year={2020},
- organization={IEEE}
-}
-
-
-@article{Milnor1976,
-title = {Curvatures of left invariant metrics on lie groups},
-journal = {Advances in Mathematics},
-volume = {21},
-number = {3},
-pages = {293-329},
-year = {1976},
-issn = {0001-8708},
-doi = {https://doi.org/10.1016/S0001-8708(76)80002-3},
-url = {https://www.sciencedirect.com/science/article/pii/S0001870876800023},
-author = {John Milnor},
-abstract = {This article outlines what is known to the author about the Riemannian geometry of a Lie group which has been provided with a Riemannian metric invariant under left translation.}
-}
\ No newline at end of file
diff --git a/dair_pll_old/docsrc/templates/module.rst_t b/dair_pll_old/docsrc/templates/module.rst_t
deleted file mode 100644
index d9a50e6..0000000
--- a/dair_pll_old/docsrc/templates/module.rst_t
+++ /dev/null
@@ -1,9 +0,0 @@
-{%- if show_headings %}
-{{- basename | e | heading }}
-
-{% endif -%}
-.. automodule:: {{ qualname }}
-{%- for option in automodule_options %}
- :{{ option }}:
-{%- endfor %}
-
diff --git a/dair_pll_old/docsrc/templates/package.rst_t b/dair_pll_old/docsrc/templates/package.rst_t
deleted file mode 100644
index 196d5c5..0000000
--- a/dair_pll_old/docsrc/templates/package.rst_t
+++ /dev/null
@@ -1,57 +0,0 @@
-{%- macro automodule(modname, options) -%}
-.. automodule:: {{ modname }}
-{%- for option in options %}
- :{{ option }}:
-{%- endfor %}
-{%- endmacro %}
-
-{%- macro toctree(docnames) -%}
-.. toctree::
- :maxdepth: {{ maxdepth }}
-{% for docname in docnames %}
- {{ docname }}
-{%- endfor %}
-{%- endmacro %}
-
-{%- if is_namespace %}
-{{- [pkgname, "namespace"] | join(" ") | e | heading }}
-{% else %}
-{{- [pkgname, "package"] | join(" ") | e | heading }}
-{% endif %}
-
-{%- if is_namespace %}
-.. py:module:: {{ pkgname }}
-{% endif %}
-
-{%- if modulefirst and not is_namespace %}
-{{ automodule(pkgname, automodule_options) }}
-{% endif %}
-
-{%- if subpackages %}
-Subpackages
------------
-
-{{ toctree(subpackages) }}
-{% endif %}
-
-{%- if submodules %}
-Submodules
-----------
-{% if separatemodules %}
-{{ toctree(submodules) }}
-{% else %}
-{%- for submodule in submodules %}
-{% if show_headings %}
-{{- submodule | e | heading(2) }}
-{% endif %}
-{{ automodule(submodule, automodule_options) }}
-{% endfor %}
-{%- endif %}
-{%- endif %}
-
-{%- if not modulefirst and not is_namespace %}
-Module contents
----------------
-
-{{ automodule(pkgname, automodule_options) }}
-{% endif %}
diff --git a/dair_pll_old/docsrc/templates/toc.rst_t b/dair_pll_old/docsrc/templates/toc.rst_t
deleted file mode 100644
index f0877ee..0000000
--- a/dair_pll_old/docsrc/templates/toc.rst_t
+++ /dev/null
@@ -1,8 +0,0 @@
-{{ header | heading }}
-
-.. toctree::
- :maxdepth: {{ maxdepth }}
-{% for docname in docnames %}
- {{ docname }}
-{%- endfor %}
-
diff --git a/dair_pll_old/examples/bundlesdf_simple.py b/dair_pll_old/examples/bundlesdf_simple.py
deleted file mode 100644
index 92958ad..0000000
--- a/dair_pll_old/examples/bundlesdf_simple.py
+++ /dev/null
@@ -1,536 +0,0 @@
-"""Simple ContactNets/differentiable physics learning examples."""
-# pylint: disable=E1103
-import os
-import time
-from typing import cast
-
-import sys
-import pdb
-
-import click
-import numpy as np
-import torch
-from torch import Tensor
-import pickle
-import git
-
-from dair_pll import file_utils
-from dair_pll.dataset_generation import DataGenerationConfig, \
- ExperimentDatasetGenerator
-from dair_pll.dataset_management import DataConfig, TrajectorySliceConfig
-from dair_pll.deep_learnable_model import MLP
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import \
- DrakeMultibodyLearnableExperiment, DrakeSystemConfig, \
- MultibodyLearnableSystemConfig, MultibodyLosses, \
- DrakeDeepLearnableExperiment
-from dair_pll.experiment import default_epoch_callback
-from dair_pll.experiment_config import OptimizerConfig, \
- SupervisedLearningExperimentConfig
-from dair_pll.hyperparameter import Float, Int
-from dair_pll.multibody_learnable_system import MultibodyLearnableSystem, \
- LOSS_PLL_ORIGINAL, LOSS_INERTIA_AGNOSTIC, LOSS_BALANCED, LOSS_POWER, \
- LOSS_CONTACT_VELOCITY, LOSS_VARIATIONS, LOSS_VARIATION_NUMBERS
-from dair_pll.state_space import UniformSampler, GaussianWhiteNoiser, \
- FloatingBaseSpace, FixedBaseSpace, ProductSpace
-from dair_pll.system import System
-
-
-# Possible systems on which to run PLL
-CUBE_SYSTEM = 'cube'
-ELBOW_SYSTEM = 'elbow'
-ASYMMETRIC_SYSTEM = 'asymmetric'
-BUNDLESDF_CUBE_SYSTEM = 'bundlesdf_cube'
-BUNDLESDF_BOTTLE_SYSTEM = 'bundlesdf_bottle'
-BUNDLESDF_NAPKIN_SYSTEM = 'bundlesdf_napkin'
-SYSTEMS = [BUNDLESDF_CUBE_SYSTEM, CUBE_SYSTEM, ELBOW_SYSTEM, ASYMMETRIC_SYSTEM, BUNDLESDF_BOTTLE_SYSTEM, BUNDLESDF_NAPKIN_SYSTEM]
-
-# Possible dataset types
-SIM_SOURCE = 'simulation'
-REAL_SOURCE = 'real'
-DYNAMIC_SOURCE = 'dynamic'
-DATA_SOURCES = [SIM_SOURCE, REAL_SOURCE, DYNAMIC_SOURCE]
-
-# Possible simulation data augmentations.
-VORTEX_AUGMENTATION = 'vortex'
-VISCOUS_AUGMENTATION = 'viscous'
-GRAVITY_AUGMENTATION = 'gravity'
-AUGMENTED_FORCE_TYPES = [VORTEX_AUGMENTATION, VISCOUS_AUGMENTATION,
- GRAVITY_AUGMENTATION]
-
-# Possible inertial parameterizations to learn for the elbow system.
-# The options are:
-# 0 - none (0 parameters)
-# 1 - masses (n_bodies - 1 parameters)
-# 2 - CoMs (3*n_bodies parameters)
-# 3 - CoMs and masses (4*n_bodies - 1 parameters)
-# 4 - all (10*n_bodies - 1 parameters)
-INERTIA_PARAM_CHOICES = [str(i) for i in range(5)]
-INERTIA_PARAM_DESCRIPTIONS = [
- 'learn no inertial parameters (0 * n_bodies)',
- 'learn only masses and not the first mass (n_bodies - 1)',
- 'learn only centers of mass (3 * n_bodies)',
- 'learn masses (except first) and centers of mass (4 * n_bodies - 1)',
- 'learn all parameters (except first mass) (10 * n_bodies - 1)']
-INERTIA_PARAM_OPTIONS = ['none', 'masses', 'CoMs', 'CoMs and masses', 'all']
-
-
-# File management.
-BUNDLESDF_CUBE_DATA_ASSET = 'bundlesdf_cube'
-BUNDLESDF_BOTTLE_DATA_ASSET = 'bundlesdf_bottle'
-BUNDLESDF_NAPKIN_DATA_ASSET = 'bundlesdf_napkin'
-CUBE_DATA_ASSET = 'contactnets_cube'
-ELBOW_DATA_ASSET = 'contactnets_elbow'
-CUBE_BOX_URDF_ASSET = 'contactnets_cube.urdf'
-CUBE_MESH_URDF_ASSET = 'contactnets_cube_mesh.urdf'
-ELBOW_BOX_URDF_ASSET = 'contactnets_elbow.urdf'
-ELBOW_MESH_URDF_ASSET = 'contactnets_elbow_mesh.urdf'
-ASYMMETRIC_URDF_ASSET = 'contactnets_asymmetric.urdf'
-BUNDLESDF_CUBE_MESH_ASSET = 'bundlesdf_cube_mesh.urdf'
-BUNDLESDF_BOTTLE_MESH_ASSET = 'bundlesdf_bottle_mesh.urdf'
-BUNDLESDF_NAPKIN_MESH_ASSET = 'bundlesdf_napkin_mesh.urdf'
-REAL_DATA_ASSETS = {CUBE_SYSTEM: CUBE_DATA_ASSET, ELBOW_SYSTEM: ELBOW_DATA_ASSET, BUNDLESDF_CUBE_SYSTEM: BUNDLESDF_CUBE_DATA_ASSET, BUNDLESDF_BOTTLE_SYSTEM: BUNDLESDF_BOTTLE_DATA_ASSET, BUNDLESDF_NAPKIN_SYSTEM: BUNDLESDF_NAPKIN_DATA_ASSET}
-
-MESH_TYPE = 'mesh'
-BOX_TYPE = 'box'
-POLYGON_TYPE = 'polygon'
-GEOMETRY_TYPES = [BOX_TYPE, MESH_TYPE, POLYGON_TYPE]
-# BUNDLESDF_CUBE_URDFS = {POLYGON_TYPE: CUBE_MESH_URDF_ASSET}
-BUNDLESDF_CUBE_URDFS = {MESH_TYPE: BUNDLESDF_CUBE_MESH_ASSET, POLYGON_TYPE: BUNDLESDF_CUBE_MESH_ASSET}
-BUNDLESDF_BOTTLE_URDFS = {POLYGON_TYPE: BUNDLESDF_BOTTLE_MESH_ASSET}
-BUNDLESDF_NAPKIN_URDFS = {POLYGON_TYPE: BUNDLESDF_NAPKIN_MESH_ASSET}
-CUBE_URDFS = {MESH_TYPE: CUBE_MESH_URDF_ASSET,
- BOX_TYPE: CUBE_BOX_URDF_ASSET,
- POLYGON_TYPE: CUBE_MESH_URDF_ASSET}
-ELBOW_URDFS = {MESH_TYPE: ELBOW_MESH_URDF_ASSET,
- BOX_TYPE: ELBOW_BOX_URDF_ASSET,
- POLYGON_TYPE: ELBOW_MESH_URDF_ASSET}
-ASYMMETRIC_URDFS = {MESH_TYPE: ASYMMETRIC_URDF_ASSET,
- POLYGON_TYPE: ASYMMETRIC_URDF_ASSET}
-TRUE_URDFS = {CUBE_SYSTEM: CUBE_URDFS, ELBOW_SYSTEM: ELBOW_URDFS,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_URDFS, BUNDLESDF_CUBE_SYSTEM: BUNDLESDF_CUBE_URDFS, BUNDLESDF_BOTTLE_SYSTEM: BUNDLESDF_BOTTLE_URDFS, BUNDLESDF_NAPKIN_SYSTEM: BUNDLESDF_NAPKIN_URDFS}
-
-CUBE_BOX_URDF_ASSET_BAD = 'contactnets_cube_bad_init.urdf'
-CUBE_BOX_URDF_ASSET_SMALL = 'contactnets_cube_small_init.urdf'
-CUBE_MESH_URDF_ASSET_SMALL = 'contactnets_cube_mesh_small_init.urdf'
-ELBOW_BOX_URDF_ASSET_BAD = 'contactnets_elbow_bad_init.urdf'
-ELBOW_BOX_URDF_ASSET_SMALL = 'contactnets_elbow_small_init.urdf'
-ELBOW_MESH_URDF_ASSET_SMALL = 'contactnets_elbow_mesh_small_init.urdf'
-CUBE_BOX_WRONG_URDFS = {'bad': CUBE_BOX_URDF_ASSET_BAD,
- 'small': CUBE_BOX_URDF_ASSET_SMALL}
-CUBE_MESH_WRONG_URDFS = {'small': CUBE_MESH_URDF_ASSET_SMALL}
-ELBOW_BOX_WRONG_URDFS = {'bad': ELBOW_BOX_URDF_ASSET_BAD,
- 'small': ELBOW_BOX_URDF_ASSET_SMALL}
-ELBOW_MESH_WRONG_URDFS = {'small': ELBOW_MESH_URDF_ASSET_SMALL}
-WRONG_BOX_URDFS = {CUBE_SYSTEM: CUBE_BOX_WRONG_URDFS,
- ELBOW_SYSTEM: ELBOW_BOX_WRONG_URDFS}
-WRONG_MESH_URDFS = {CUBE_SYSTEM: CUBE_MESH_WRONG_URDFS,
- ELBOW_SYSTEM: ELBOW_MESH_WRONG_URDFS}
-WRONG_URDFS_BY_GEOM_THEN_SYSTEM = {MESH_TYPE: WRONG_MESH_URDFS,
- POLYGON_TYPE: WRONG_MESH_URDFS,
- BOX_TYPE: WRONG_BOX_URDFS}
-
-REPO_DIR = os.path.normpath(
- git.Repo(search_parent_directories=True).git.rev_parse("--show-toplevel"))
-
-# Data configuration.
-DT = 0.0068
-
-# Generation configuration.
-CUBE_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-ELBOW_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, np.pi, 0., 0., 0., 0., 0., -.075, 0.])
-ASYMMETRIC_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-# TODO: seems not using this for real experiments
-BOTTLE_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-NAPKIN_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-X_0S = {CUBE_SYSTEM: CUBE_X_0,
- ELBOW_SYSTEM: ELBOW_X_0,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_X_0,
- BUNDLESDF_CUBE_SYSTEM: CUBE_X_0,
- BUNDLESDF_BOTTLE_SYSTEM: BOTTLE_X_0,
- BUNDLESDF_NAPKIN_SYSTEM: NAPKIN_X_0}
-CUBE_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, 6., 6., 6., 1.5, 1.5, .075
-])
-ELBOW_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, np.pi, 6., 6., 6., 1.5,
- 1.5, .075, 6.
-])
-ASYMMETRIC_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, 6., 6., 6., 1.5, 1.5, .075
-])
-SAMPLER_RANGES = {
- CUBE_SYSTEM: CUBE_SAMPLER_RANGE,
- ELBOW_SYSTEM: ELBOW_SAMPLER_RANGE,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_SAMPLER_RANGE,
- BUNDLESDF_CUBE_SYSTEM: CUBE_SAMPLER_RANGE,
-}
-TRAJECTORY_LENGTHS = {CUBE_SYSTEM: 80, ELBOW_SYSTEM: 120, ASYMMETRIC_SYSTEM: 80, BUNDLESDF_CUBE_SYSTEM: 80}
-
-# Training data configuration.
-T_PREDICTION = 1
-
-# Optimization configuration.
-CUBE_LR = 1e-3
-ELBOW_LR = 1e-3
-ASYMMETRIC_LR = 1e-3
-BUNDLESDF_CUBE_LR = 1e-3
-BUNDLESDF_BOTTLE_LR = 1e-3
-BUNDLESDF_NAPKIN_LR = 1e-3
-LRS = {CUBE_SYSTEM: CUBE_LR,
- ELBOW_SYSTEM: ELBOW_LR,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_LR,
- BUNDLESDF_CUBE_SYSTEM: BUNDLESDF_CUBE_LR,
- BUNDLESDF_BOTTLE_SYSTEM: BUNDLESDF_BOTTLE_LR,
- BUNDLESDF_NAPKIN_SYSTEM: BUNDLESDF_NAPKIN_LR}
-CUBE_WD = 0.0
-ELBOW_WD = 0.0 #1e-4
-ASYMMETRIC_WD = 0.0
-BUNLESDF_CUBE_WD = 0.0
-BUNDLESDF_BOTTLE_WD = 0.0
-BUNDLESDF_NAPKIN_WD = 0.0
-WDS = {CUBE_SYSTEM: CUBE_WD,
- ELBOW_SYSTEM: ELBOW_WD,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_WD,
- BUNDLESDF_CUBE_SYSTEM: BUNLESDF_CUBE_WD,
- BUNDLESDF_BOTTLE_SYSTEM: BUNDLESDF_BOTTLE_WD,
- BUNDLESDF_NAPKIN_SYSTEM: BUNDLESDF_NAPKIN_WD}
-DEFAULT_WEIGHT_RANGE = (1e-2, 1e2)
-EPOCHS = 200 # change this (originally 500)
-PATIENCE = 10 # change this (originally EPOCHS)
-
-WANDB_DEFAULT_PROJECT = 'dair_pll-examples'
-
-
-def main(storage_folder_name: str = "",
- run_name: str = "",
- system: str = CUBE_SYSTEM,
- source: str = REAL_SOURCE,
- structured: bool = True,
- contactnets: bool = True,
- geometry: str = MESH_TYPE,
- regenerate: bool = False,
- dataset_size: int = 512,
- inertia_params: str = '4',
- loss_variation: str = '0',
- true_sys: bool = False,
- wandb_project: str = WANDB_DEFAULT_PROJECT,
- w_pred: float = 1e0,
- w_comp: float = 1e0,
- w_diss: float = 1e0,
- w_pen: float = 1e0,
- w_res: float = 1e0,
- w_res_w: float = 1e0,
- do_residual: bool = False,
- additional_forces: str = None,
- g_frac: float = 1.0,
- iteration: int = 1):
- """Execute ContactNets basic example on a system.
-
- Args:
- storage_folder_name: name of outer storage directory.
- run_name: name of experiment run.
- system: Which system to learn.
- source: Where to get data from.
- contactnets: Whether to use ContactNets or prediction loss.
- geometry: How to represent geometry (box, mesh, or polygon).
- regenerate: Whether save updated URDF's each epoch.
- dataset_size: Number of trajectories for train/val/test.
- inertia_params: What inertial parameters to learn.
- true_sys: Whether to start with the "true" URDF or poor initialization.
- wandb_project: What W&B project to store results under.
- w_pred: Weight of prediction term in ContactNets loss.
- w_comp: Weight of complementarity term in ContactNets loss.
- w_diss: Weight of dissipation term in ContactNets loss.
- w_pen: Weight of penetration term in ContactNets loss.
- w_res: Weight of residual regularization term in loss.
- do_residual: Whether to add residual physics block.
- additional_forces: Optionally provide additional forces to augment any
- generated simulation data. Is ignored if using real data.
- g_frac: Fraction of gravity to use with initial model. Is ignored
- unless additional_forces == gravity.
- """
- # pylint: disable=too-many-locals, too-many-arguments
-
- print(f'Starting test under \'{storage_folder_name}\' ' \
- + f'with name \'{run_name}\':' \
- + f'\n\tPerforming on system: {system} \n\twith source: {source}' \
- + f'\n\twith structured parameterization: {structured}' \
- + f'\n\tusing ContactNets: {contactnets}' \
- + f'\n\twith geometry represented as: {geometry}' \
- + f'\n\tregenerate: {regenerate}' \
- + f'\n\tinertia learning mode: {inertia_params}' \
- + f'\n\twith description: {INERTIA_PARAM_OPTIONS[int(inertia_params)]}' \
- + f'\n\tloss variation: {loss_variation}' \
- + f'\n\twith description: {LOSS_VARIATIONS[int(loss_variation)]}' \
- + f'\n\tloss weights (pred, comp, diss, pen, res, res_w): ' \
- + f'({w_pred}, {w_comp}, {w_diss}, {w_pen}, {w_res}, {w_res_w})' \
- + f'\n\twith residual: {do_residual}' \
- + f'\n\tand starting with provided true_sys={true_sys}' \
- + f'\n\tinjecting into dynamics (if sim): {additional_forces}' \
- + f'\n\twith gravity fraction (if gravity): {g_frac}' \
- + f'\n\tand loading shape prior from: {storage_folder_name}/runs/{run_name}/{system}_mesh_vis.urdf')
-
- simulation = source == SIM_SOURCE
- dynamic = source == DYNAMIC_SOURCE
-
- storage_name = os.path.join(REPO_DIR, 'results', storage_folder_name)
-
- # If this script is used in conjuction with pll_manager.py, then the file
- # management is taken care of there.
-
- print(f'\nStoring data at {file_utils.data_dir(storage_name)}')
- print(f'Storing results at {file_utils.run_dir(storage_name, run_name)}')
-
- # Next, build the configuration of the learning experiment.
-
- # If starting with true system, no need to train, since we probably just
- # want to generate statistics.
- num_epochs = 0 if true_sys else EPOCHS
-
- # Describes the optimizer settings; by default, the optimizer is Adam.
- optimizer_config = OptimizerConfig(lr=Float(LRS[system]),
- wd=Float(WDS[system]),
- patience=PATIENCE,
- epochs=num_epochs,
- batch_size=Int(int(dataset_size/2)))
-
- # Describes the ground truth system; infers everything from the URDF.
- # This is a configuration for a DrakeSystem, which wraps a Drake
- # simulation for the described URDFs.
- # first, select urdfs
- if iteration == 0:
- urdf_asset = TRUE_URDFS[system][geometry]
- urdf = file_utils.get_asset(urdf_asset)
- else:
- urdf_asset = f'{REAL_DATA_ASSETS[system]}_mesh_vis.urdf'
- urdf = file_utils.get_generated_urdf(file_utils.run_dir(storage_name, run_name), urdf_asset)
- print(f'>>>>>>>>>> Using gt-urdf: {urdf}')
- urdfs = {system: urdf}
- base_config = DrakeSystemConfig(urdfs=urdfs)
-
- # how to slice trajectories into training datapoints
- slice_config = TrajectorySliceConfig(
- t_prediction=1 if contactnets else T_PREDICTION)
-
- # Describes configuration of the data
- data_config = DataConfig(dt=DT,
- train_fraction=1.0 if dynamic else 0.5,
- valid_fraction=0.0 if dynamic else 0.25,
- test_fraction=0.0 if dynamic else 0.25,
- slice_config=slice_config,
- update_dynamically=dynamic)
-
- if structured:
- loss = MultibodyLosses.CONTACTNETS_LOSS if contactnets else \
- MultibodyLosses.PREDICTION_LOSS
-
- learnable_config = MultibodyLearnableSystemConfig(
- urdfs=urdfs, loss=loss, inertia_mode=int(inertia_params),
- loss_variation=int(loss_variation), w_pred=w_pred,
- w_comp = Float(w_comp, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_diss = Float(w_diss, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_pen = Float(w_pen, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_res = Float(w_res, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_res_w = Float(w_res_w, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- do_residual=do_residual, represent_geometry_as=geometry,
- randomize_initialization = not true_sys, g_frac=g_frac)
-
- else:
- learnable_config = DeepLearnableSystemConfig(
- layers=4, hidden_size=256,
- nonlinearity=torch.nn.Tanh, model_constructor=MLP)
-
- # Combines everything into config for entire experiment.
- experiment_config = SupervisedLearningExperimentConfig(
- data_config=data_config,
- base_config=base_config,
- learnable_config=learnable_config,
- optimizer_config=optimizer_config,
- storage=storage_name,
- run_name=run_name,
- run_wandb=True,
- wandb_project=wandb_project,
- full_evaluation_period=EPOCHS if dynamic else 1,
- update_geometry_in_videos=True # ignored for deep learnable experiments
- )
-
- # Make experiment.
- experiment = DrakeMultibodyLearnableExperiment(experiment_config) \
- if structured else DrakeDeepLearnableExperiment(experiment_config)
-
- # Prepare data.
- x_0 = X_0S[system]
- if simulation:
- # For simulation, specify the following:
- data_generation_config = DataGenerationConfig(
- dt=DT,
- # timestep
- n_pop=dataset_size,
- # How many trajectories to simulate
- trajectory_length=TRAJECTORY_LENGTHS[system],
- # trajectory length
- x_0=x_0,
- # A nominal initial state
- sampler_type=UniformSampler,
- # use uniform distribution to sample ``x_0``
- sampler_ranges=SAMPLER_RANGES[system],
- # How much to vary initial states around ``x_0``
- noiser_type=GaussianWhiteNoiser,
- # Distribution of noise in trajectory data (Gaussian).
- static_noise=torch.zeros(x_0.nelement() - 1),
- # constant-in-time noise standard deviations (zero in this case)
- dynamic_noise=torch.zeros(x_0.nelement() - 1),
- # i.i.d.-in-time noise standard deviations (zero in this case)
- storage=storage_name
- # where to store trajectories
- )
-
- if additional_forces == None:
- data_generation_system = experiment.get_base_system()
- else:
- data_generation_system = experiment.get_augmented_system(
- additional_forces)
-
- generator = ExperimentDatasetGenerator(
- data_generation_system, data_generation_config)
- print(f'Generating (or getting existing) simulation trajectories.\n')
- generator.generate()
-
- else:
- # otherwise, specify directory with [T, n_x] tensor files saved as
- # 0.pt, 1.pt, ...
- # See :mod:`dair_pll.state_space` for state format.
- data_asset = REAL_DATA_ASSETS[system]
- import_directory = file_utils.get_asset(data_asset)
- print(f'Getting real trajectories from {import_directory}\n')
- file_utils.import_data_to_storage(storage_name,
- import_data_dir=import_directory,
- num=dataset_size)
-
- def regenerate_callback(epoch: int, learned_system: System,
- train_loss: Tensor,
- best_valid_loss: Tensor) -> None:
- default_epoch_callback(epoch, learned_system, train_loss,
- best_valid_loss)
- cast(MultibodyLearnableSystem, learned_system).generate_updated_urdfs(
- suffix='progress')
-
- # Trains system and saves final results.
- print(f'\nTraining the model.')
- learned_system, stats = experiment.generate_results(
- regenerate_callback if regenerate else default_epoch_callback)
-
- # # Save the final urdf.
- # if structured:
- # print(f'\nSaving the final learned URDF.')
- # learned_system = cast(MultibodyLearnableSystem, learned_system)
- # learned_system.generate_updated_urdfs(suffix='best')
- # else:
- # print(f'\nFinished training deep learnable; no URDF export.')
- print(f'Done!')
-
-
-
-
-@click.command()
-@click.argument('storage_folder_name')
-@click.argument('run_name')
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=BUNDLESDF_CUBE_SYSTEM)
-@click.option('--source',
- type=click.Choice(DATA_SOURCES, case_sensitive=True),
- default=REAL_SOURCE)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=MESH_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--dataset-size',
- default=512,
- help="dataset size")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='0',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='0',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--wandb-project',
- type = str,
- default=WANDB_DEFAULT_PROJECT,
- help="what W&B project to save results under.")
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization term in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=None,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-@click.option('--iteration',
- type=int, default=0,help="current iteration of combined training")
-def main_command(storage_folder_name: str, run_name: str, system: str,
- source: str, structured: bool, contactnets: bool,
- geometry: str, regenerate: bool, dataset_size: int,
- inertia_params: str, loss_variation: str, true_sys: bool,
- wandb_project: str, w_pred: float, w_comp: float,
- w_diss: float, w_pen: float, w_res: float, w_res_w: float,
- residual: bool, additional_forces: str, g_frac: float, iteration: int):
- """Executes main function with argument interface."""
- assert storage_folder_name is not None
- assert run_name is not None
-
- main(storage_folder_name, run_name, system, source, structured, contactnets,
- geometry, regenerate, dataset_size, inertia_params, loss_variation,
- true_sys, wandb_project, w_pred, w_comp, w_diss, w_pen, w_res, w_res_w,
- residual, additional_forces, g_frac, iteration)
-
-
-if __name__ == '__main__':
- main_command() # pylint: disable=no-value-for-parameter
diff --git a/dair_pll_old/examples/contactnets_cube_dynamic_data_source.py b/dair_pll_old/examples/contactnets_cube_dynamic_data_source.py
deleted file mode 100644
index d27f247..0000000
--- a/dair_pll_old/examples/contactnets_cube_dynamic_data_source.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import os
-import time
-
-from dair_pll import file_utils
-
-CUBE_DATA_ASSET = 'contactnets_cube'
-CUBE_DATA_INPUT_FOLDER = file_utils.get_asset(CUBE_DATA_ASSET)
-STORAGE_NAME = os.path.join(os.path.dirname(__file__),
- 'storage',
- CUBE_DATA_ASSET)
-N_POP = file_utils.get_numeric_file_count(CUBE_DATA_INPUT_FOLDER, '.pt')
-N_MIN = min(N_POP, 4)
-print(CUBE_DATA_INPUT_FOLDER,N_POP)
-
-for i in range(N_POP):
- if i >= N_MIN:
- time.sleep(10)
- in_file = os.path.join(CUBE_DATA_INPUT_FOLDER, f'{i}.pt')
- out_file = file_utils.trajectory_file(STORAGE_NAME, i)
- print(f'sending {i}.pt')
- os.system(f'cp {in_file} {out_file}')
diff --git a/dair_pll_old/examples/contactnets_simple.py b/dair_pll_old/examples/contactnets_simple.py
deleted file mode 100644
index a0bdbdb..0000000
--- a/dair_pll_old/examples/contactnets_simple.py
+++ /dev/null
@@ -1,494 +0,0 @@
-"""Simple ContactNets/differentiable physics learning examples."""
-# pylint: disable=E1103
-import os
-import time
-from typing import cast
-
-import sys
-import pdb
-
-import click
-import numpy as np
-import torch
-from torch import Tensor
-import pickle
-import git
-
-from dair_pll import file_utils
-from dair_pll.dataset_generation import DataGenerationConfig, \
- ExperimentDatasetGenerator
-from dair_pll.dataset_management import DataConfig, TrajectorySliceConfig
-from dair_pll.deep_learnable_model import MLP
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import \
- DrakeMultibodyLearnableExperiment, DrakeSystemConfig, \
- MultibodyLearnableSystemConfig, MultibodyLosses, \
- DrakeDeepLearnableExperiment
-from dair_pll.experiment import default_epoch_callback
-from dair_pll.experiment_config import OptimizerConfig, \
- SupervisedLearningExperimentConfig
-from dair_pll.hyperparameter import Float, Int
-from dair_pll.multibody_learnable_system import MultibodyLearnableSystem, \
- LOSS_PLL_ORIGINAL, LOSS_INERTIA_AGNOSTIC, LOSS_BALANCED, LOSS_POWER, \
- LOSS_CONTACT_VELOCITY, LOSS_VARIATIONS, LOSS_VARIATION_NUMBERS
-from dair_pll.state_space import UniformSampler, GaussianWhiteNoiser, \
- FloatingBaseSpace, FixedBaseSpace, ProductSpace
-from dair_pll.system import System
-
-
-# Possible systems on which to run PLL
-CUBE_SYSTEM = 'cube'
-ELBOW_SYSTEM = 'elbow'
-ASYMMETRIC_SYSTEM = 'asymmetric'
-BOTTLE_SYSTEM = 'bottle'
-SYSTEMS = [CUBE_SYSTEM, ELBOW_SYSTEM, ASYMMETRIC_SYSTEM, BOTTLE_SYSTEM]
-
-# Possible dataset types
-SIM_SOURCE = 'simulation'
-REAL_SOURCE = 'real'
-DYNAMIC_SOURCE = 'dynamic'
-DATA_SOURCES = [SIM_SOURCE, REAL_SOURCE, DYNAMIC_SOURCE]
-
-# Possible simulation data augmentations.
-VORTEX_AUGMENTATION = 'vortex'
-VISCOUS_AUGMENTATION = 'viscous'
-GRAVITY_AUGMENTATION = 'gravity'
-AUGMENTED_FORCE_TYPES = [VORTEX_AUGMENTATION, VISCOUS_AUGMENTATION,
- GRAVITY_AUGMENTATION]
-
-# Possible inertial parameterizations to learn for the elbow system.
-# The options are:
-# 0 - none (0 parameters)
-# 1 - masses (n_bodies - 1 parameters)
-# 2 - CoMs (3*n_bodies parameters)
-# 3 - CoMs and masses (4*n_bodies - 1 parameters)
-# 4 - all (10*n_bodies - 1 parameters)
-INERTIA_PARAM_CHOICES = [str(i) for i in range(5)]
-INERTIA_PARAM_DESCRIPTIONS = [
- 'learn no inertial parameters (0 * n_bodies)',
- 'learn only masses and not the first mass (n_bodies - 1)',
- 'learn only centers of mass (3 * n_bodies)',
- 'learn masses (except first) and centers of mass (4 * n_bodies - 1)',
- 'learn all parameters (except first mass) (10 * n_bodies - 1)']
-INERTIA_PARAM_OPTIONS = ['none', 'masses', 'CoMs', 'CoMs and masses', 'all']
-
-
-# File management.
-CUBE_DATA_ASSET = 'contactnets_cube'
-ELBOW_DATA_ASSET = 'contactnets_elbow'
-CUBE_BOX_URDF_ASSET = 'contactnets_cube.urdf'
-CUBE_MESH_URDF_ASSET = 'contactnets_cube_mesh.urdf'
-ELBOW_BOX_URDF_ASSET = 'contactnets_elbow.urdf'
-ELBOW_MESH_URDF_ASSET = 'contactnets_elbow_mesh.urdf'
-ASYMMETRIC_URDF_ASSET = 'contactnets_asymmetric.urdf'
-
-REAL_DATA_ASSETS = {CUBE_SYSTEM: CUBE_DATA_ASSET, ELBOW_SYSTEM: ELBOW_DATA_ASSET}
-
-MESH_TYPE = 'mesh'
-BOX_TYPE = 'box'
-POLYGON_TYPE = 'polygon'
-GEOMETRY_TYPES = [BOX_TYPE, MESH_TYPE, POLYGON_TYPE]
-
-CUBE_URDFS = {MESH_TYPE: CUBE_MESH_URDF_ASSET,
- BOX_TYPE: CUBE_BOX_URDF_ASSET,
- POLYGON_TYPE: CUBE_MESH_URDF_ASSET}
-ELBOW_URDFS = {MESH_TYPE: ELBOW_MESH_URDF_ASSET,
- BOX_TYPE: ELBOW_BOX_URDF_ASSET,
- POLYGON_TYPE: ELBOW_MESH_URDF_ASSET}
-ASYMMETRIC_URDFS = {MESH_TYPE: ASYMMETRIC_URDF_ASSET,
- POLYGON_TYPE: ASYMMETRIC_URDF_ASSET}
-TRUE_URDFS = {CUBE_SYSTEM: CUBE_URDFS, ELBOW_SYSTEM: ELBOW_URDFS,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_URDFS}
-
-
-CUBE_BOX_URDF_ASSET_BAD = 'contactnets_cube_bad_init.urdf'
-CUBE_BOX_URDF_ASSET_SMALL = 'contactnets_cube_small_init.urdf'
-CUBE_MESH_URDF_ASSET_SMALL = 'contactnets_cube_mesh_small_init.urdf'
-ELBOW_BOX_URDF_ASSET_BAD = 'contactnets_elbow_bad_init.urdf'
-ELBOW_BOX_URDF_ASSET_SMALL = 'contactnets_elbow_small_init.urdf'
-ELBOW_MESH_URDF_ASSET_SMALL = 'contactnets_elbow_mesh_small_init.urdf'
-CUBE_BOX_WRONG_URDFS = {'bad': CUBE_BOX_URDF_ASSET_BAD,
- 'small': CUBE_BOX_URDF_ASSET_SMALL}
-CUBE_MESH_WRONG_URDFS = {'small': CUBE_MESH_URDF_ASSET_SMALL}
-ELBOW_BOX_WRONG_URDFS = {'bad': ELBOW_BOX_URDF_ASSET_BAD,
- 'small': ELBOW_BOX_URDF_ASSET_SMALL}
-ELBOW_MESH_WRONG_URDFS = {'small': ELBOW_MESH_URDF_ASSET_SMALL}
-WRONG_BOX_URDFS = {CUBE_SYSTEM: CUBE_BOX_WRONG_URDFS,
- ELBOW_SYSTEM: ELBOW_BOX_WRONG_URDFS}
-WRONG_MESH_URDFS = {CUBE_SYSTEM: CUBE_MESH_WRONG_URDFS,
- ELBOW_SYSTEM: ELBOW_MESH_WRONG_URDFS}
-WRONG_URDFS_BY_GEOM_THEN_SYSTEM = {MESH_TYPE: WRONG_MESH_URDFS,
- POLYGON_TYPE: WRONG_MESH_URDFS,
- BOX_TYPE: WRONG_BOX_URDFS}
-
-REPO_DIR = os.path.normpath(
- git.Repo(search_parent_directories=True).git.rev_parse("--show-toplevel"))
-
-# Data configuration.
-DT = 0.0068
-
-# Generation configuration.
-CUBE_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-ELBOW_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, np.pi, 0., 0., 0., 0., 0., -.075, 0.])
-ASYMMETRIC_X_0 = torch.tensor(
- [1., 0., 0., 0., 0., 0., 0.21 + .015, 0., 0., 0., 0., 0., -.075])
-X_0S = {CUBE_SYSTEM: CUBE_X_0, ELBOW_SYSTEM: ELBOW_X_0,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_X_0}
-CUBE_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, 6., 6., 6., 1.5, 1.5, .075
-])
-ELBOW_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, np.pi, 6., 6., 6., 1.5,
- 1.5, .075, 6.
-])
-ASYMMETRIC_SAMPLER_RANGE = torch.tensor([
- 2 * np.pi, 2 * np.pi, 2 * np.pi, .03, .03, .015, 6., 6., 6., 1.5, 1.5, .075
-])
-SAMPLER_RANGES = {
- CUBE_SYSTEM: CUBE_SAMPLER_RANGE,
- ELBOW_SYSTEM: ELBOW_SAMPLER_RANGE,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_SAMPLER_RANGE
-}
-TRAJECTORY_LENGTHS = {CUBE_SYSTEM: 80, ELBOW_SYSTEM: 120, ASYMMETRIC_SYSTEM: 80}
-
-# Training data configuration.
-T_PREDICTION = 1
-
-# Optimization configuration.
-CUBE_LR = 1e-3
-ELBOW_LR = 1e-3
-ASYMMETRIC_LR = 1e-3
-LRS = {CUBE_SYSTEM: CUBE_LR, ELBOW_SYSTEM: ELBOW_LR,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_LR}
-CUBE_WD = 0.0
-ELBOW_WD = 0.0 #1e-4
-ASYMMETRIC_WD = 0.0
-WDS = {CUBE_SYSTEM: CUBE_WD, ELBOW_SYSTEM: ELBOW_WD,
- ASYMMETRIC_SYSTEM: ASYMMETRIC_WD}
-DEFAULT_WEIGHT_RANGE = (1e-2, 1e2)
-EPOCHS = 200 # change this (originally 500)
-PATIENCE = 10 # change this (originally EPOCHS)
-
-WANDB_DEFAULT_PROJECT = 'dair_pll-examples'
-
-
-def main(storage_folder_name: str = "",
- run_name: str = "",
- system: str = CUBE_SYSTEM,
- source: str = REAL_SOURCE,
- structured: bool = True,
- contactnets: bool = True,
- geometry: str = MESH_TYPE,
- regenerate: bool = False,
- dataset_size: int = 512,
- inertia_params: str = '4',
- loss_variation: str = '0',
- true_sys: bool = False,
- wandb_project: str = WANDB_DEFAULT_PROJECT,
- w_pred: float = 1e0,
- w_comp: float = 1e0,
- w_diss: float = 1e0,
- w_pen: float = 1e0,
- w_res: float = 1e0,
- w_res_w: float = 1e0,
- do_residual: bool = False,
- additional_forces: str = None,
- g_frac: float = 1.0):
- """Execute ContactNets basic example on a system.
-
- Args:
- storage_folder_name: name of outer storage directory.
- run_name: name of experiment run.
- system: Which system to learn.
- source: Where to get data from.
- contactnets: Whether to use ContactNets or prediction loss.
- geometry: How to represent geometry (box, mesh, or polygon).
- regenerate: Whether save updated URDF's each epoch.
- dataset_size: Number of trajectories for train/val/test.
- inertia_params: What inertial parameters to learn.
- true_sys: Whether to start with the "true" URDF or poor initialization.
- wandb_project: What W&B project to store results under.
- w_pred: Weight of prediction term in ContactNets loss.
- w_comp: Weight of complementarity term in ContactNets loss.
- w_diss: Weight of dissipation term in ContactNets loss.
- w_pen: Weight of penetration term in ContactNets loss.
- w_res: Weight of residual regularization term in loss.
- do_residual: Whether to add residual physics block.
- additional_forces: Optionally provide additional forces to augment any
- generated simulation data. Is ignored if using real data.
- g_frac: Fraction of gravity to use with initial model. Is ignored
- unless additional_forces == gravity.
- """
- # pylint: disable=too-many-locals, too-many-arguments
-
- print(f'Starting test under \'{storage_folder_name}\' ' \
- + f'with name \'{run_name}\':' \
- + f'\n\tPerforming on system: {system} \n\twith source: {source}' \
- + f'\n\twith structured parameterization: {structured}' \
- + f'\n\tusing ContactNets: {contactnets}' \
- + f'\n\twith geometry represented as: {geometry}' \
- + f'\n\tregenerate: {regenerate}' \
- + f'\n\tinertia learning mode: {inertia_params}' \
- + f'\n\twith description: {INERTIA_PARAM_OPTIONS[int(inertia_params)]}' \
- + f'\n\tloss variation: {loss_variation}' \
- + f'\n\twith description: {LOSS_VARIATIONS[int(loss_variation)]}' \
- + f'\n\tloss weights (pred, comp, diss, pen, res, res_w): ' \
- + f'({w_pred}, {w_comp}, {w_diss}, {w_pen}, {w_res}, {w_res_w})' \
- + f'\n\twith residual: {do_residual}' \
- + f'\n\tand starting with provided true_sys={true_sys}' \
- + f'\n\tinjecting into dynamics (if sim): {additional_forces}' \
- + f'\n\twith gravity fraction (if gravity): {g_frac}')
-
- simulation = source == SIM_SOURCE
- dynamic = source == DYNAMIC_SOURCE
-
- storage_name = os.path.join(REPO_DIR, 'results', storage_folder_name)
-
- # If this script is used in conjuction with pll_manager.py, then the file
- # management is taken care of there.
-
- print(f'\nStoring data at {file_utils.data_dir(storage_name)}')
- print(f'Storing results at {file_utils.run_dir(storage_name, run_name)}')
-
- # Next, build the configuration of the learning experiment.
-
- # If starting with true system, no need to train, since we probably just
- # want to generate statistics.
- num_epochs = 0 if true_sys else EPOCHS
-
- # Describes the optimizer settings; by default, the optimizer is Adam.
- optimizer_config = OptimizerConfig(lr=Float(LRS[system]),
- wd=Float(WDS[system]),
- patience=PATIENCE,
- epochs=num_epochs,
- batch_size=Int(int(dataset_size/2)))
-
- # Describes the ground truth system; infers everything from the URDF.
- # This is a configuration for a DrakeSystem, which wraps a Drake
- # simulation for the described URDFs.
- # first, select urdfs
- urdf_asset = TRUE_URDFS[system][geometry]
- urdf = file_utils.get_asset(urdf_asset)
- urdfs = {system: urdf}
- base_config = DrakeSystemConfig(urdfs=urdfs)
-
- # how to slice trajectories into training datapoints
- slice_config = TrajectorySliceConfig(
- t_prediction=1 if contactnets else T_PREDICTION)
-
- # Describes configuration of the data
- data_config = DataConfig(dt=DT,
- train_fraction=1.0 if dynamic else 0.5,
- valid_fraction=0.0 if dynamic else 0.25,
- test_fraction=0.0 if dynamic else 0.25,
- slice_config=slice_config,
- update_dynamically=dynamic)
-
- if structured:
- loss = MultibodyLosses.CONTACTNETS_LOSS if contactnets else \
- MultibodyLosses.PREDICTION_LOSS
-
- learnable_config = MultibodyLearnableSystemConfig(
- urdfs=urdfs, loss=loss, inertia_mode=int(inertia_params),
- loss_variation=int(loss_variation), w_pred=w_pred,
- w_comp = Float(w_comp, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_diss = Float(w_diss, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_pen = Float(w_pen, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_res = Float(w_res, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- w_res_w = Float(w_res_w, log=True, distribution=DEFAULT_WEIGHT_RANGE),
- do_residual=do_residual, represent_geometry_as=geometry,
- randomize_initialization = not true_sys, g_frac=g_frac)
-
- else:
- learnable_config = DeepLearnableSystemConfig(
- layers=4, hidden_size=256,
- nonlinearity=torch.nn.Tanh, model_constructor=MLP)
-
- # Combines everything into config for entire experiment.
- experiment_config = SupervisedLearningExperimentConfig(
- data_config=data_config,
- base_config=base_config,
- learnable_config=learnable_config,
- optimizer_config=optimizer_config,
- storage=storage_name,
- run_name=run_name,
- run_wandb=True,
- wandb_project=wandb_project,
- full_evaluation_period=EPOCHS if dynamic else 1,
- update_geometry_in_videos=True # ignored for deep learnable experiments
- )
-
- # Make experiment.
- experiment = DrakeMultibodyLearnableExperiment(experiment_config) \
- if structured else DrakeDeepLearnableExperiment(experiment_config)
-
- # Prepare data.
- x_0 = X_0S[system]
- if simulation:
- # For simulation, specify the following:
- data_generation_config = DataGenerationConfig(
- dt=DT,
- # timestep
- n_pop=dataset_size,
- # How many trajectories to simulate
- trajectory_length=TRAJECTORY_LENGTHS[system],
- # trajectory length
- x_0=x_0,
- # A nominal initial state
- sampler_type=UniformSampler,
- # use uniform distribution to sample ``x_0``
- sampler_ranges=SAMPLER_RANGES[system],
- # How much to vary initial states around ``x_0``
- noiser_type=GaussianWhiteNoiser,
- # Distribution of noise in trajectory data (Gaussian).
- static_noise=torch.zeros(x_0.nelement() - 1),
- # constant-in-time noise standard deviations (zero in this case)
- dynamic_noise=torch.zeros(x_0.nelement() - 1),
- # i.i.d.-in-time noise standard deviations (zero in this case)
- storage=storage_name
- # where to store trajectories
- )
-
- if additional_forces == None:
- data_generation_system = experiment.get_base_system()
- else:
- data_generation_system = experiment.get_augmented_system(
- additional_forces)
-
- generator = ExperimentDatasetGenerator(
- data_generation_system, data_generation_config)
- print(f'Generating (or getting existing) simulation trajectories.\n')
- generator.generate()
-
- else:
- # otherwise, specify directory with [T, n_x] tensor files saved as
- # 0.pt, 1.pt, ...
- # See :mod:`dair_pll.state_space` for state format.
- data_asset = REAL_DATA_ASSETS[system]
- import_directory = file_utils.get_asset(data_asset)
- print(f'Getting real trajectories from {import_directory}\n')
- file_utils.import_data_to_storage(storage_name,
- import_data_dir=import_directory,
- num=dataset_size)
-
- def regenerate_callback(epoch: int, learned_system: System,
- train_loss: Tensor,
- best_valid_loss: Tensor) -> None:
- default_epoch_callback(epoch, learned_system, train_loss,
- best_valid_loss)
- cast(MultibodyLearnableSystem, learned_system).generate_updated_urdfs(
- suffix='progress')
-
- # Trains system and saves final results.
- print(f'\nTraining the model.')
- learned_system, stats = experiment.generate_results(
- regenerate_callback if regenerate else default_epoch_callback)
-
- # # Save the final urdf.
- # if structured:
- # print(f'\nSaving the final learned URDF.')
- # learned_system = cast(MultibodyLearnableSystem, learned_system)
- # learned_system.generate_updated_urdfs(suffix='best')
- # else:
- # print(f'\nFinished training deep learnable; no URDF export.')
- print(f'Done!')
-
-
-
-
-@click.command()
-@click.argument('storage_folder_name')
-@click.argument('run_name')
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--source',
- type=click.Choice(DATA_SOURCES, case_sensitive=True),
- default=REAL_SOURCE)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=MESH_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--dataset-size',
- default=512,
- help="dataset size")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='0',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--wandb-project',
- type = str,
- default=WANDB_DEFAULT_PROJECT,
- help="what W&B project to save results under.")
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization term in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=None,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-def main_command(storage_folder_name: str, run_name: str, system: str,
- source: str, structured: bool, contactnets: bool,
- geometry: str, regenerate: bool, dataset_size: int,
- inertia_params: str, loss_variation: str, true_sys: bool,
- wandb_project: str, w_pred: float, w_comp: float,
- w_diss: float, w_pen: float, w_res: float, w_res_w: float,
- residual: bool, additional_forces: str, g_frac: float):
- """Executes main function with argument interface."""
- assert storage_folder_name is not None
- assert run_name is not None
-
- main(storage_folder_name, run_name, system, source, structured, contactnets,
- geometry, regenerate, dataset_size, inertia_params, loss_variation,
- true_sys, wandb_project, w_pred, w_comp, w_diss, w_pen, w_res, w_res_w,
- residual, additional_forces, g_frac)
-
-
-if __name__ == '__main__':
- main_command() # pylint: disable=no-value-for-parameter
diff --git a/dair_pll_old/examples/dynamic_meshcat.py b/dair_pll_old/examples/dynamic_meshcat.py
deleted file mode 100644
index 7a647a4..0000000
--- a/dair_pll_old/examples/dynamic_meshcat.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""Script that finds the latest meshcat server URL for the current experiment."""
-
-
-import os
-import git
-import pdb
-
-from dair_pll import file_utils
-
-
-EXP_KEY = 'PLL_EXPERIMENT'
-
-
-def main_command():
- # First, get the latest meshcat server URL for the current experiment.
- pll_name = os.getenv(EXP_KEY)
-
- mc_log_name = file_utils.LOG_DIR + f'/meshcat_{pll_name}.txt'
- meshcat_log = open(mc_log_name, 'r').read()
-
- new_url = meshcat_log.split('web_url=')[-1].split('\n')[0]
-
- # Second, write a new static html file using the updated URL.
- base_html = file_utils.get_asset('static.html')
- script = open(base_html, 'r').read()
-
- script = script.replace('http://127.0.0.1:7000/static/', new_url)
-
- repo = git.Repo(search_parent_directories=True)
- repo_dir = repo.git.rev_parse("--show-toplevel")
- storage_name = os.path.join(repo_dir, 'results', pll_name)
- storage_loc = file_utils.storage_dir(storage_name)
-
- out_file = storage_loc + f'/static.html'
- with open(out_file, "w") as of:
- of.write(script)
-
-
-if __name__ == '__main__':
- main_command()
\ No newline at end of file
diff --git a/dair_pll_old/examples/pll_manager.py b/dair_pll_old/examples/pll_manager.py
deleted file mode 100644
index 686fde2..0000000
--- a/dair_pll_old/examples/pll_manager.py
+++ /dev/null
@@ -1,1141 +0,0 @@
-"""Manager for starting GRASP cluster PLL jobs."""
-import os
-import os.path as op
-import git
-import click
-import subprocess
-import time
-import pdb
-import fnmatch
-import wandb
-from typing import List, Optional
-
-from dair_pll import file_utils
-
-from dair_pll.multibody_learnable_system import LOSS_INERTIA_AGNOSTIC, \
- LOSS_PLL_ORIGINAL, LOSS_BALANCED, LOSS_POWER, LOSS_CONTACT_VELOCITY, \
- LOSS_VARIATIONS, LOSS_VARIATION_NUMBERS
-
-
-
-# Possible categories for automatic run name generation.
-TEST = 'test'
-DEV = 'dev'
-SWEEP = 'sweep'
-HYPERPARAMETER_SIM = 'hyperparam'
-HYPERPARAMETER_REAL = 'hpr' #eal'
-SIM_AUG_HYPERPARAMETER = 'shp'
-GRAVITY_SWEEP = 'gravity_sweep'
-CATEGORIES = [TEST, DEV, SWEEP, HYPERPARAMETER_SIM, HYPERPARAMETER_REAL,
- GRAVITY_SWEEP, SIM_AUG_HYPERPARAMETER]
-
-# Possible dataset types
-SIM_SOURCE = 'simulation'
-REAL_SOURCE = 'real'
-DYNAMIC_SOURCE = 'dynamic'
-DATA_SOURCES = [SIM_SOURCE, REAL_SOURCE, DYNAMIC_SOURCE]
-
-# Possible systems on which to run PLL
-CUBE_SYSTEM = 'cube'
-ELBOW_SYSTEM = 'elbow'
-ASYMMETRIC_SYSTEM = 'asymmetric'
-SYSTEMS = [CUBE_SYSTEM, ELBOW_SYSTEM, ASYMMETRIC_SYSTEM]
-
-# Possible simulation data augmentations.
-NO_AUGMENTATION = None
-VORTEX_AUGMENTATION = 'vortex'
-VISCOUS_AUGMENTATION = 'viscous'
-GRAVITY_AUGMENTATION = 'gravity'
-AUGMENTED_FORCE_TYPES = [NO_AUGMENTATION, VORTEX_AUGMENTATION,
- VISCOUS_AUGMENTATION, GRAVITY_AUGMENTATION]
-
-# Possible run name regex patterns.
-CUBE_TEST_PATTERN = 'tc??'
-ELBOW_TEST_PATTERN = 'te??'
-CUBE_DEV_PATTERN = 'dc??'
-ELBOW_DEV_PATTERN = 'de??'
-CUBE_SWEEP_PATTERN = 'sc??'
-ELBOW_SWEEP_PATTERN = 'se??'
-RUN_PREFIX_TO_FOLDER_NAME = {'tc': f'{TEST}_{CUBE_SYSTEM}',
- 'te': f'{TEST}_{ELBOW_SYSTEM}',
- 'dc': f'{DEV}_{CUBE_SYSTEM}',
- 'de': f'{DEV}_{ELBOW_SYSTEM}',
- 'sc': f'{SWEEP}_{CUBE_SYSTEM}',
- 'se': f'{SWEEP}_{ELBOW_SYSTEM}',
- 'hc': f'{HYPERPARAMETER_SIM}_{CUBE_SYSTEM}',
- 'he': f'{HYPERPARAMETER_SIM}_{ELBOW_SYSTEM}',
- 'ic': f'{HYPERPARAMETER_REAL}_{CUBE_SYSTEM}',
- 'ie': f'{HYPERPARAMETER_REAL}_{ELBOW_SYSTEM}'}
-
-# Possible geometry types
-BOX_TYPE = 'box'
-MESH_TYPE = 'mesh'
-POLYGON_TYPE = 'polygon'
-GEOMETRY_TYPES = [BOX_TYPE, MESH_TYPE, POLYGON_TYPE]
-
-# Possible results management options
-OVERWRITE_DATA_AND_RUNS = 'data_and_runs'
-OVERWRITE_SINGLE_RUN_KEEP_DATA = 'run'
-OVERWRITE_NOTHING = 'nothing'
-OVERWRITE_RESULTS = [OVERWRITE_DATA_AND_RUNS,
- OVERWRITE_SINGLE_RUN_KEEP_DATA,
- OVERWRITE_NOTHING]
-
-# Possible W&B project names.
-WANDB_PROJECT_CLUSTER = 'dair_pll-cluster'
-WANDB_PROJECT_LOCAL = 'dair_pll-dev'
-WANDB_PROJECTS = {True: WANDB_PROJECT_LOCAL,
- False: WANDB_PROJECT_CLUSTER}
-
-# Possible inertial parameterizations to learn for the elbow system.
-# The options are:
-# 0 - learn no inertial parameters (0 for elbow)
-# 1 - learn the mass of second and beyond links (1 for elbow)
-# 2 - learn the locations of all links' centers of mass (6 for elbow)
-# 3 - learn second and beyond masses and all centers of mass (7 for elbow)
-# 4 - learn all parameters except mass of first link (19 for elbow)
-INERTIA_PARAM_CHOICES = [str(i) for i in range(5)]
-INERTIA_PARAM_DESCRIPTIONS = [
- 'learn no inertial parameters (0 * n_bodies)',
- 'learn only masses and not the first mass (n_bodies - 1)',
- 'learn only locations of centers of mass (3 * n_bodies)',
- 'learn masses (except first) and centers of mass (4 * n_bodies - 1)',
- 'learn all parameters (except first mass) (10 * n_bodies - 1)']
-INERTIA_PARAM_OPTIONS = ['none', 'masses', 'CoMs', 'CoMs and masses', 'all']
-
-WANDB_NO_GROUP_MESSAGE = \
- 'echo "Not exporting WANDB_RUN_GROUP since restarting."'
-
-# Weights to try in hyperparameter search
-HYPERPARAMETER_WEIGHTS = [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]
-
-# Gravity fractions to try
-GRAVITY_FRACTIONS = [0., 0.5, 1., 1.5, 2.]
-
-
-def create_instance(storage_folder_name: str, run_name: str,
- system: str = CUBE_SYSTEM,
- source: str = SIM_SOURCE,
- structured: bool = True,
- contactnets: bool = True,
- geometry: str = BOX_TYPE,
- regenerate: bool = True,
- dataset_size: int = 0,
- local: bool = False,
- inertia_params: str = '4',
- loss_variation: str = '0',
- true_sys: bool = True,
- restart: bool = False,
- wandb_group_id: str = None,
- w_pred: float = 1e0,
- w_comp: float = 1e0,
- w_diss: float = 1e0,
- w_pen: float = 1e0,
- w_res: float = 1e0,
- w_res_w: float = 1e0,
- do_residual: bool = False,
- additional_forces: str = None,
- g_frac: float = 1.0):
- # Do some checks on the requested parameter combinations.
- if not additional_forces in [NO_AUGMENTATION, GRAVITY_AUGMENTATION]:
- assert source==SIM_SOURCE, "Must use simulation for augmented dynamics."
- if system == ASYMMETRIC_SYSTEM:
- assert source==SIM_SOURCE, "Must use simulation for asymmetric object."
- if not structured:
- if geometry != POLYGON_TYPE:
- print("Use mesh type for end-to-end comparisons --> using polygon.")
- geometry = POLYGON_TYPE
- if not contactnets:
- print("Must use prediction loss with end-to-end model --> " + \
- "setting to prediction loss.")
- contactnets = False
- if not regenerate:
- print("Can't regenerate URDFs from end-to-end model --> " + \
- "no regeneration.")
- regenerate = False
- elif additional_forces != GRAVITY_AUGMENTATION:
- if g_frac != 1.0:
- print("No gravity augmentation --> setting g_frac to 1.")
- g_frac = 1.0
- if system==ASYMMETRIC_SYSTEM and geometry==BOX_TYPE:
- print("No box representation of asymmetric system --> " + \
- "using polygon.")
- geometry = POLYGON_TYPE
-
- print(f'Generating experiment {storage_folder_name}/{run_name}')
-
- if wandb_group_id is None:
- wandb_group_id = '' if restart else \
- f'{run_name}_{wandb.util.generate_id()}'
-
- base_file = 'startup'
- out_file = f'{base_file}_{storage_folder_name}_{run_name}.bash'
-
- # use local template if running locally
- base_file += '_local.bash' if local else '.bash'
-
- base_file = op.join(op.dirname(__file__), base_file)
- out_file = op.join(op.dirname(__file__), out_file)
-
-
- script = open(base_file, 'r').read()
-
- script = script.replace('{storage_folder_name}', storage_folder_name)
- script = script.replace('{run_name}', run_name)
- script = script.replace('{restart}', 'true' if restart else 'false')
- script = script.replace('{wandb_group_id}', wandb_group_id)
-
- train_options = ''
-
- if not restart:
- train_options = f' --system={system} --source={source}' + \
- f' --dataset-size={dataset_size}' + \
- f' --inertia-params={inertia_params}' + \
- f' --loss-variation={loss_variation}' + \
- f' --geometry={geometry}'
- train_options += ' --structured' if structured else ' --end-to-end'
- train_options += ' --contactnets' if contactnets else ' --prediction'
- train_options += ' --regenerate' if regenerate else ' --no-regenerate'
- train_options += ' --true-sys' if true_sys else ' --wrong-sys'
- train_options += f' --wandb-project={WANDB_PROJECTS[local]}'
- train_options += ' --residual' if do_residual else ' --no-residual'
- train_options += f' --additional-forces={additional_forces}' if \
- additional_forces != None else ''
-
- if structured:
- train_options += f' --w-res={w_res}'
- train_options += f' --w-res-w={w_res_w}'
- train_options += f' --g-frac={g_frac}'
- if structured and contactnets:
- train_options += f' --w-pred={w_pred}'
- train_options += f' --w-comp={w_comp}'
- train_options += f' --w-diss={w_diss}'
- train_options += f' --w-pen={w_pen}'
-
- script = script.replace('{train_args}', train_options)
-
- repo = git.Repo(search_parent_directories=True)
- git_folder = repo.git.rev_parse("--show-toplevel")
- git_folder = op.normpath(git_folder)
- script = script.replace('{pll_dir}', git_folder)
-
- commit_hash = repo.head.object.hexsha
- script = script.replace('{hash}', commit_hash)
-
- with open(out_file, "w") as of:
- of.write(script)
-
- train_cmd = ['bash', out_file] if local else ['sbatch', out_file]
- print(f'Creating and queuing {out_file}')
- ec = subprocess.run(train_cmd)
- print(f'Queued file.')
-
-
-def get_slurm_from_instances(instances: List[str], prefix='pll'):
- jobids = []
- for instance in instances:
- cmd = ['squeue', f'--user={os.getlogin()}', '--format', '%.18i',
- '--noheader', '--name', f'{prefix}_{instance}']
- ps = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- ps.wait()
- (out, err) = ps.communicate()
- out = out.decode('unicode-escape')
- out = ''.join(i for i in out if i.isdigit())
- if len(out) > 0:
- jobids.append(out)
- return jobids
-
-
-def attach_tb(name: str, local: bool = False):
- print(f'Working to attach tensorboard...')
- repo = git.Repo(search_parent_directories=True)
- git_folder = repo.git.rev_parse("--show-toplevel")
- git_folder = op.normpath(git_folder)
-
- if not local:
- tb_script = op.join(op.dirname(__file__), 'tensorboard.bash')
- tb_logfile = op.join(git_folder, 'logs', 'tensorboard_' + name + '.txt')
- os.system(f'rm {tb_logfile}')
- tboard_cmd = ['sbatch', f'--output={tb_logfile}', \
- f'--job-name=tb_{name}'.format(tb_logfile), tb_script, \
- op.join(git_folder, 'results', name, 'tensorboard'), name]
- ec = subprocess.run(tboard_cmd)
-
- # wait for and report tensorboard url
- print('Waiting on TensorBoard startup ...')
- lines = []
- while not op.exists(tb_logfile):
- time.sleep(0.1)
- while len(lines) < 1:
- with open(tb_logfile) as f:
- lines = f.readlines()
- time.sleep(1.0)
- print(f'\nTensorBoard running on {lines[0]}\n')
-
- else:
- tb_script = op.join(op.dirname(__file__), 'tensorboard_local.bash')
- tboard_cmd = ['bash', tb_script,
- op.join(git_folder, 'results', name, 'tensorboard'), name]
- print(f'Starting local TensorBoard command: {tboard_cmd}')
- ec = subprocess.run(tboard_cmd)
-
- print(f'Ran tensorboard command.')
-
-
-def take_care_of_file_management(overwrite: str, storage_name: str,
- run_name: str) -> None:
- """Take care of file management.
-
- Todo:
- This isn't fool-proof. Even if overwrite is set to nothing or to keep
- data, the data gets overwritten if the dataset size is different.
- """
- if overwrite == OVERWRITE_DATA_AND_RUNS:
- os.system(f'rm -r {file_utils.storage_dir(storage_name)}')
-
- elif overwrite == OVERWRITE_SINGLE_RUN_KEEP_DATA:
- os.system(f'rm -r {file_utils.run_dir(storage_name, run_name)}')
-
- elif overwrite == OVERWRITE_NOTHING:
- # Do nothing. If the experiment and run did not already exist, it will
- # make it. Otherwise it will continue the experiment run.
- pass
-
- else:
- raise NotImplementedError('Choose 1 of 3 result overwriting options')
-
-
-def check_for_git_updates(repo):
- """Check for git updates."""
- commits_ahead = sum(1 for _ in repo.iter_commits('origin/main..main'))
- if commits_ahead > 0:
- if not click.confirm(f'You are {commits_ahead} commits ahead of' \
- + f' main branch, continue?'):
- raise RuntimeError('Make sure you have pushed commits!')
-
- changed_files = [item.a_path for item in repo.index.diff(None)]
- if len(changed_files) > 0:
- print('Uncommitted changes to:')
- print(changed_files)
- if not click.confirm('Continue?'):
- raise RuntimeError('Make sure you have committed changes!')
-
-
-def experiment_class_command(category: str, run_name: str, system: str,
- structured: bool, contactnets: bool, geometry: str, regenerate: bool,
- local: bool, inertia_params: str, loss_variation: str, true_sys: bool,
- overwrite: str, w_pred: float, w_comp: float, w_diss: float, w_pen: float,
- w_res: float, w_res_w: float, dataset_exponent: int = None,
- last_run_num: int = None, number: int = 1, do_residual: bool = False,
- additional_forces: str = None, g_frac: float = 1.0):
- """Executes main function with argument interface."""
-
- assert category in CATEGORIES
- if dataset_exponent is not None:
- assert dataset_exponent in range(2, 10)
-
- def get_run_name_pattern(category, system):
- run_name_pattern = \
- 'i' if category == HYPERPARAMETER_REAL else \
- 'v' if (category == SWEEP and \
- additional_forces==VORTEX_AUGMENTATION) else \
- 'b' if (category == SWEEP and \
- additional_forces==VISCOUS_AUGMENTATION) else \
- 'a' if category==SIM_AUG_HYPERPARAMETER else \
- category[0] # t/d/h/i/v/b for test/dev/{hp sim/real}/vortex/viscous
- run_name_pattern += system[0] # c for cube or e for elbow
- run_name_pattern += '????' if category==HYPERPARAMETER_SIM else \
- '????' if category==HYPERPARAMETER_REAL else \
- '????' if category==SIM_AUG_HYPERPARAMETER else '??'
- run_name_pattern += '-?' if category in [SWEEP, GRAVITY_SWEEP] else ''
- return run_name_pattern
-
- # First, take care of data management and how to keep track of results.
- storage_folder_name = f'{category}_{system}'
- storage_folder_name += f'_{additional_forces}' if \
- not additional_forces in [None, GRAVITY_AUGMENTATION] else ''
- storage_folder_name += f'-{dataset_exponent}' if category==SWEEP else ''
-
- repo = git.Repo(search_parent_directories=True)
- repo_dir = repo.git.rev_parse("--show-toplevel")
- storage_name = op.join(repo_dir, 'results', storage_folder_name)
-
- if run_name is None:
- nums_to_display = 4 if category == HYPERPARAMETER_SIM else \
- 4 if category == HYPERPARAMETER_REAL else \
- 4 if category == SIM_AUG_HYPERPARAMETER else 2
- if last_run_num is None:
- runs_dir = file_utils.all_runs_dir(storage_name)
- runs_list = sorted(os.listdir(runs_dir))
- if len(runs_list) > 0:
- last_run_name = runs_list[-1]
- last_run_num = int(last_run_name.split('-')[0][2:])
- else:
- last_run_num = -1
- run_name = 'i' if category == HYPERPARAMETER_REAL else \
- 'v' if (category == SWEEP and \
- additional_forces==VORTEX_AUGMENTATION) else \
- 'b' if (category == SWEEP and \
- additional_forces==VISCOUS_AUGMENTATION) else \
- 'a' if category==SIM_AUG_HYPERPARAMETER else \
- category[0]
- run_name += 'c' if system==CUBE_SYSTEM else \
- 'e' if system==ELBOW_SYSTEM else 'a'
- run_name += str(last_run_num+1).zfill(nums_to_display)
- run_name += f'-{dataset_exponent}' if category==SWEEP else \
- f'-{GRAVITY_FRACTIONS.index(g_frac)}' \
- if category==GRAVITY_SWEEP else ''
-
- run_name_pattern = get_run_name_pattern(category, system)
- assert fnmatch.fnmatch(run_name, run_name_pattern)
-
- print(f'\nOverwrite set to {overwrite}.')
-
- if op.isdir(op.join(storage_name, 'runs', run_name)):
- if not click.confirm(f'\nPause! Experiment \'' \
- + f'{storage_folder_name}/{run_name}\'' \
- + f' already taken, continue?'):
- raise RuntimeError('Choose a new run name next time.')
- elif number > 1 and op.isdir(op.join(storage_name, 'runs', f'{run_name}-0')):
- raise RuntimeError(f'Found experiment run {storage_folder_name}/' + \
- f'{run_name}-0. Choose a new base name next time.')
-
- #UNDO changed 512 to 64
- dataset_size = 4 if category == TEST else \
- 64 if category == DEV else \
- 512 if category == HYPERPARAMETER_SIM else \
- 64 if category == HYPERPARAMETER_REAL else \
- 512 if category == GRAVITY_SWEEP else \
- 64 if category == SIM_AUG_HYPERPARAMETER else \
- 2**dataset_exponent # if category == SWEEP
-
- source = SIM_SOURCE if (category==SWEEP and additional_forces!=None) else \
- REAL_SOURCE if category == SWEEP else \
- REAL_SOURCE if category == HYPERPARAMETER_REAL else SIM_SOURCE
-
- names = [run_name] if number == 1 else \
- [f'{run_name}-{i}' for i in range(number)]
- wandb_group_id = None if number == 1 else \
- f'{run_name}_{wandb.util.generate_id()}'
-
- for run_name_i in names:
- take_care_of_file_management(overwrite, storage_name, run_name_i)
-
- # Continue creating PLL instance.
- create_instance(storage_folder_name, run_name_i, system=system,
- source=source, structured=structured,
- contactnets=contactnets, geometry=geometry,
- regenerate=regenerate, dataset_size=dataset_size,
- local=local, inertia_params=inertia_params,
- loss_variation=loss_variation, true_sys=true_sys,
- restart=False, wandb_group_id=wandb_group_id,
- w_pred=w_pred, w_comp=w_comp, w_diss=w_diss,
- w_pen=w_pen, w_res=w_res, w_res_w=w_res_w,
- do_residual=do_residual,
- additional_forces=additional_forces, g_frac=g_frac)
-
-
-@click.group()
-def cli():
- pass
-
-
-@cli.command('create')
-@click.argument('storage_folder_name')
-@click.argument('run_name')
-@click.option('--number',
- default=1,
- help="number of grouped identical experiments to run")
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--source',
- type=click.Choice(DATA_SOURCES, case_sensitive=True),
- default=SIM_SOURCE)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=BOX_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--dataset-size',
- default=512,
- help="dataset size")
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='1',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--overwrite',
- type=click.Choice(OVERWRITE_RESULTS, case_sensitive=True),
- default=OVERWRITE_NOTHING)
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization term in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=NO_AUGMENTATION,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-def create_command(storage_folder_name: str, run_name: str, number: int,
- system: str, source: str, structured: bool,
- contactnets: bool, geometry: str, regenerate: bool,
- dataset_size: int, local: bool, inertia_params: str,
- loss_variation: str, true_sys: bool, overwrite: str,
- w_pred: float, w_comp: float, w_diss: float, w_pen: float,
- w_res: float, w_res_w: float, residual: bool,
- additional_forces: str, g_frac: float):
- """Executes main function with argument interface."""
-
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- # First, take care of data management and how to keep track of results.
- # Check if experiment name was given and if it already exists. We also
- # don't want hyphens in the name since that's how the sweep instances
- # are created.
- assert loss_variation in LOSS_VARIATION_NUMBERS
- assert storage_folder_name is not None
- assert run_name is not None
- assert '-' not in run_name
- repo_dir = repo.git.rev_parse("--show-toplevel")
- storage_name = op.join(repo_dir, 'results', storage_folder_name)
-
- print(f'\nOverwrite set to {overwrite}.')
-
- if op.isdir(op.join(storage_name, 'runs', run_name)):
- if not click.confirm(f'\nPause! Experiment \'' \
- + f'{storage_folder_name}/{run_name}\'' \
- + f' already taken, continue?'):
- raise RuntimeError('Choose a new run name next time.')
- elif op.isdir(op.join(storage_name, 'runs', f'{run_name}-0')):
- raise RuntimeError(f'Found experiment run {storage_folder_name}/' + \
- f'{run_name}-0. Choose a new base name next time.')
- elif op.isdir(storage_name):
- dataset_size_in_folder = file_utils.get_numeric_file_count(
- file_utils.learning_data_dir(storage_name))
- if not click.confirm(f'\nPause! Experiment storage \'' \
- + f'{storage_folder_name}\'' \
- + f' already taken with {dataset_size_in_folder}' \
- + f' dataset size, continue?'):
- raise RuntimeError('Choose a new storage name next time.')
-
- names = [run_name] if number == 1 else \
- [f'{run_name}-{i}' for i in range(number)]
- wandb_group_id = None if number == 1 else \
- f'{run_name}_{wandb.util.generate_id()}'
-
- for run_name_i in names:
- take_care_of_file_management(overwrite, storage_name, run_name_i)
-
- # Continue creating PLL instance.
- create_instance(storage_folder_name, run_name_i, system, source,
- structured, contactnets, geometry, regenerate,
- dataset_size, local, inertia_params,
- loss_variation, true_sys, restart=False,
- wandb_group_id=wandb_group_id, w_pred=w_pred,
- w_comp=w_comp, w_diss=w_diss, w_pen=w_pen, w_res=w_res,
- w_res_w=w_res_w, do_residual=residual,
- additional_forces=additional_forces, g_frac=g_frac)
-
-
-@cli.command('test')
-@click.option('--run_name',
- type=str,
- default=None)
-@click.option('--number',
- default=1,
- help="number of grouped identical experiments to run")
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=BOX_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='1',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--overwrite',
- type=click.Choice(OVERWRITE_RESULTS, case_sensitive=True),
- default=OVERWRITE_NOTHING)
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=NO_AUGMENTATION,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-def test_command(run_name: str, number: int, system: str, structured: bool,
- contactnets: bool, geometry: str, regenerate: bool,
- local: bool, inertia_params: str, loss_variation: str,
- true_sys: bool, overwrite: str, w_pred: float, w_comp: float,
- w_diss: float, w_pen: float, w_res: float, w_res_w: float,
- residual: bool, additional_forces: str, g_frac: float):
- """Executes main function with argument interface."""
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- experiment_class_command('test', run_name, system=system,
- structured=structured, contactnets=contactnets,
- geometry=geometry, regenerate=regenerate,
- local=local, inertia_params=inertia_params,
- loss_variation=loss_variation, true_sys=true_sys,
- overwrite=overwrite, number=number, w_pred=w_pred,
- w_comp=w_comp, w_diss=w_diss, w_pen=w_pen,
- w_res=w_res, w_res_w=w_res_w, do_residual=residual,
- additional_forces=additional_forces, g_frac=g_frac)
-
-@cli.command('dev')
-@click.option('--run_name',
- type=str,
- default=None)
-@click.option('--number',
- default=1,
- help="number of grouped identical experiments to run")
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=BOX_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='1',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--overwrite',
- type=click.Choice(OVERWRITE_RESULTS, case_sensitive=True),
- default=OVERWRITE_NOTHING)
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=NO_AUGMENTATION,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-def dev_command(run_name: str, number: int, system: str, structured: bool,
- contactnets: bool, geometry: str, regenerate: bool, local: bool,
- inertia_params: str, loss_variation: str, true_sys: bool,
- overwrite: str, w_pred: float, w_comp: float, w_diss: float,
- w_pen: float, w_res: float, w_res_w: float, residual: bool,
- additional_forces: str, g_frac: float):
- """Executes main function with argument interface."""
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- experiment_class_command('dev', run_name, system=system,
- structured=structured, contactnets=contactnets,
- geometry=geometry, regenerate=regenerate,
- local=local, inertia_params=inertia_params,
- loss_variation=loss_variation, true_sys=true_sys,
- overwrite=overwrite, number=number, w_pred=w_pred,
- w_comp=w_comp, w_diss=w_diss, w_pen=w_pen,
- w_res=w_res, w_res_w=w_res_w, do_residual=residual,
- additional_forces=additional_forces, g_frac=g_frac)
-
-
-@cli.command('restart')
-@click.argument('run_name')
-@click.option('--storage-folder-name',
- type=str,
- default='')
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-def restart_command(run_name: str, storage_folder_name: str, local: bool):
- """Restarts a previously started run."""
-
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- # Figure out the storage folder name if not provided
- if storage_folder_name == '':
- assert len(run_name) == 4
- assert run_name[0] in ['t', 'd']
- assert run_name[1] in ['c', 'e']
- assert int(run_name[2:]) + 1
-
- storage_folder_name = RUN_PREFIX_TO_FOLDER_NAME[run_name[:2]]
-
- # Check that both the storage folder name and run name exist.
- repo_dir = repo.git.rev_parse("--show-toplevel")
- storage_name = op.join(repo_dir, 'results', storage_folder_name)
-
- if not op.isdir(op.join(storage_name, 'runs', run_name)):
- raise RuntimeError(f'Error! Could not find run under ' + \
- f'{storage_folder_name} with run {run_name}.')
-
- print(f'Found experiment run \'{run_name}\' in \'{storage_folder_name}\'')
-
- create_instance(storage_folder_name, run_name, local=local, restart=True)
-
-
-
-
-@cli.command('sweep')
-@click.option('--sweep-name',
- type=str,
- default=None)
-@click.option('--number',
- default=1,
- help="number of grouped identical experiments to run")
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--structured/--end-to-end',
- default=True,
- help="whether to train structured parameters or deep network.")
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=BOX_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--loss-variation',
- type=click.Choice(LOSS_VARIATION_NUMBERS),
- default='1',
- help="ContactNets loss variation")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--w-pred',
- type=float,
- default=1e0,
- help="weight of prediction term in ContactNets loss")
-@click.option('--w-comp',
- type=float,
- default=1e0,
- help="weight of complementarity term in ContactNets loss")
-@click.option('--w-diss',
- type=float,
- default=1e0,
- help="weight of dissipation term in ContactNets loss")
-@click.option('--w-pen',
- type=float,
- default=1e0,
- help="weight of penetration term in ContactNets loss")
-@click.option('--w-res',
- type=float,
- default=1e0,
- help="weight of residual norm regularization in loss")
-@click.option('--w-res-w',
- type=float,
- default=1e0,
- help="weight of residual weight regularization term in loss")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=NO_AUGMENTATION,
- help="what kind of additional forces to augment simulation data.")
-@click.option('--g-frac',
- type=float,
- default=1e0,
- help="fraction of gravity constant to use.")
-@click.option('--min-exponent',
- type=int,
- default=2,
- help="minimum dataset exponent to use")
-def sweep_command(sweep_name: str, number: int, system: str, structured: bool,
- contactnets: bool, geometry: str, regenerate: bool,
- local: bool, inertia_params: str, loss_variation: str,
- true_sys: bool, w_pred: float, w_comp: float, w_diss: float,
- w_pen: float, w_res: float, w_res_w: float, residual: bool,
- additional_forces: str, g_frac: float, min_exponent: int):
- """Starts a series of instances, sweeping over dataset size."""
- assert sweep_name is None or '-' not in sweep_name
-
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- if additional_forces == GRAVITY_AUGMENTATION:
- print('Gravity augmentation --> sweeping over gravity instead of ' + \
- 'dataset size.')
- category = GRAVITY_SWEEP
- else:
- category = SWEEP
-
- # First determine what run number to use so they are consistent for each
- # dataset size.
- last_run_num = -1
- repo = git.Repo(search_parent_directories=True)
- repo_dir = repo.git.rev_parse("--show-toplevel")
- if (category == GRAVITY_SWEEP) or (additional_forces == None):
- partial_storage_name = op.join(repo_dir, 'results', f'{category}_{system}')
- else:
- partial_storage_name = op.join(repo_dir, 'results',
- f'{category}_{system}_{additional_forces}')
-
- sweep_range = range(2, 10) if category==SWEEP else \
- range(len(GRAVITY_FRACTIONS))
- for sweep_i in sweep_range:
- storage_name = f'{partial_storage_name}-{sweep_i}'
- if op.isdir(storage_name):
- runs_dir = file_utils.all_runs_dir(storage_name)
- runs_list = sorted(os.listdir(runs_dir))
- if len(runs_list) > 0:
- last_run_num = max(last_run_num, int(runs_list[-1][2:4]))
-
- if op.isdir(partial_storage_name):
- runs_dir = file_utils.all_runs_dir(partial_storage_name)
- runs_list = sorted(os.listdir(runs_dir))
- if len(runs_list) > 0:
- last_run_num = max(last_run_num, int(runs_list[-1][2:4]))
-
- # Automatically set the hyperparameter weights.
- w_pred = 1e0
- W_COMP_BY_LOSS_VAR = {3: 0.01, 1: 0.001, 0: 1.0}
- W_DISS_BY_LOSS_VAR = {3: 0.0001, 1: 0.1, 0: 1.0}
- W_PEN_BY_LOSS_VAR = {3: 1000, 1: 100, 0: 1.0}
- # W_RES_BY_LOSS_VAR = {3: 1000, 1: 10, 0: 1000}
- # W_RES_W_BY_LOSS_VAR = {3: 0.0001, 1: 10, 0: 1}
- W_RES_BY_LOSS_VAR = {3: 0.001, 1: 1, 0: 1.}
- W_RES_W_BY_LOSS_VAR = {3: 0., 1: 0.1, 0: 0.}
-
- loss_variation = 0 if not contactnets else int(loss_variation)
- w_comp = W_COMP_BY_LOSS_VAR[loss_variation]
- w_diss = W_DISS_BY_LOSS_VAR[loss_variation]
- w_pen = W_PEN_BY_LOSS_VAR[loss_variation]
- w_res = W_RES_BY_LOSS_VAR[loss_variation]
- w_res_w = W_RES_W_BY_LOSS_VAR[loss_variation]
-
- print(f'Using hyperparameters: ({w_pred}, {w_comp}, {w_diss}, {w_pen}, {w_res}, {w_res_w})')
- print(f'Will create experiment number: {last_run_num+1}')
- if not click.confirm('Continue?'):
- raise RuntimeError("Figure out experiment numbers next time.")
-
- if category==SWEEP:
- # Create a pll instance for every dataset size from 4 (or more) to 512
- for dataset_exponent in range(min_exponent, 10):
- experiment_class_command(category, sweep_name, system=system,
- structured=structured,
- contactnets=contactnets,
- geometry=geometry, regenerate=regenerate,
- local=local, inertia_params=inertia_params,
- loss_variation=loss_variation,
- true_sys=true_sys,
- dataset_exponent=dataset_exponent,
- last_run_num=last_run_num,
- overwrite=OVERWRITE_NOTHING,
- number=number, w_pred=w_pred,
- w_comp=w_comp, w_diss=w_diss, w_pen=w_pen,
- w_res=w_res, w_res_w=w_res_w,
- do_residual=residual,
- additional_forces=additional_forces,
- g_frac=g_frac)
- elif category==GRAVITY_SWEEP:
- # Create a pll instance for every gravity fraction. Use full dataset.
- dataset_exponent = 9
- for g_frac in GRAVITY_FRACTIONS:
- experiment_class_command(category, sweep_name, system=system,
- structured=structured,
- contactnets=contactnets,
- geometry=geometry, regenerate=regenerate,
- local=local, inertia_params=inertia_params,
- loss_variation=loss_variation,
- true_sys=true_sys,
- dataset_exponent=dataset_exponent,
- last_run_num=last_run_num,
- overwrite=OVERWRITE_NOTHING,
- number=number, w_pred=w_pred,
- w_comp=w_comp, w_diss=w_diss, w_pen=w_pen,
- w_res=w_res, w_res_w=w_res_w,
- do_residual=residual,
- additional_forces=additional_forces,
- g_frac=g_frac)
-
-
-
-@cli.command('hyperparam')
-@click.option('--hp_name',
- type=str,
- default=None)
-@click.option('--number',
- default=1,
- help="number of grouped identical experiments to run")
-@click.option('--system',
- type=click.Choice(SYSTEMS, case_sensitive=True),
- default=CUBE_SYSTEM)
-@click.option('--source',
- type=click.Choice(DATA_SOURCES, case_sensitive=True),
- default=SIM_SOURCE)
-@click.option('--contactnets/--prediction',
- default=True,
- help="whether to train on ContactNets or prediction loss.")
-@click.option('--geometry',
- type=click.Choice(GEOMETRY_TYPES, case_sensitive=True),
- default=BOX_TYPE,
- help="how to represent geometry.")
-@click.option('--regenerate/--no-regenerate',
- default=False,
- help="whether to save updated URDF's each epoch or not.")
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-@click.option('--inertia-params',
- type=click.Choice(INERTIA_PARAM_CHOICES),
- default='4',
- help="what inertia parameters to learn.")
-@click.option('--true-sys/--wrong-sys',
- default=False,
- help="whether to start with correct or poor URDF.")
-@click.option('--residual/--no-residual',
- default=False,
- help="whether to include residual physics or not.")
-@click.option('--additional-forces',
- type = click.Choice(AUGMENTED_FORCE_TYPES),
- default=NO_AUGMENTATION,
- help="what kind of additional forces to augment simulation data.")
-def hyperparameter_command(hp_name: str, number: int, system: str, source: str,
- contactnets: bool, geometry: str, regenerate: bool,
- local: bool, inertia_params: str, true_sys: bool,
- residual: bool, additional_forces: str):
- """Starts a series of instances, sweeping over dataset size."""
- assert hp_name is None or '-' not in hp_name
-
- # Check if git repository has uncommitted changes.
- repo = git.Repo(search_parent_directories=True)
- check_for_git_updates(repo)
-
- # Call the project 'hpreal' for "hyper parameter real" if using real data,
- # else 'hyperparam' for simulation data.
- # experiment_name = 'hpr' #UNDO'hpreal' if source == REAL_SOURCE else 'hyperparam'
- experiment_name = 'shp'
-
- # First determine what run number to use so they are consistent for each
- # dataset size.
- last_run_num = -1
- repo = git.Repo(search_parent_directories=True)
- repo_dir = repo.git.rev_parse("--show-toplevel")
- storage_name = op.join(repo_dir, 'results', f'{experiment_name}_{system}')
- storage_name += f'_{additional_forces}' if additional_forces != None else ''
-
- if op.isdir(storage_name):
- runs_dir = file_utils.all_runs_dir(storage_name)
- runs_list = sorted(os.listdir(runs_dir))
- if len(runs_list) > 0:
- last_run_num = max(last_run_num, int(runs_list[-1][2:6]))
-
- print(f'Will create experiment number: {last_run_num+1}')
- if not click.confirm('Continue?'):
- raise RuntimeError("Figure out experiment numbers next time.")
-
- # Search over weights for 3 of the loss components for loss variations 1, 2,
- # and 3 (leave out 0 since it's a scaled version of 1).
- w_pred = 1e0
- w_comp_by_loss_var = {3: 0.01, 1: 0.001, 0: 1.0}
- w_diss_by_loss_var = {3: 0.0001, 1: 0.1, 0: 1.0}
- w_pen_by_loss_var = {3: 1000, 1: 100, 0: 1.0}
- for w_res in HYPERPARAMETER_WEIGHTS:
- for w_res_w in HYPERPARAMETER_WEIGHTS:
- # for w_pen in HYPERPARAMETER_WEIGHTS:
- # if w_comp == w_diss == w_pen == 1e0:
- # # Already ran many tests with (1, 1, 1, 1) weights, so can
- # # skip repeating this hyperparameter set.
- # continue
- # if w_comp > 1:
- # continue
- # if w_diss > 1:
- # continue
- # if w_pen < 1e-2:
- # continue
-
- for loss_variation in [0, 1, 3]:
- contactnets=False if loss_variation==0 else True
-
- w_comp = w_comp_by_loss_var[loss_variation]
- w_diss = w_diss_by_loss_var[loss_variation]
- w_pen = w_pen_by_loss_var[loss_variation]
-
- experiment_class_command(
- experiment_name, hp_name, system=system,
- structured=True, contactnets=contactnets, geometry=geometry,
- regenerate=regenerate, local=local,
- inertia_params=inertia_params,
- loss_variation=loss_variation, true_sys=true_sys,
- last_run_num=last_run_num, overwrite=OVERWRITE_NOTHING,
- number=number, w_pred=w_pred, w_comp=w_comp,
- w_diss=w_diss, w_pen=w_pen, w_res=w_res, w_res_w=w_res_w,
- do_residual=residual, additional_forces=additional_forces)
- last_run_num += 1
-
-
-
-@cli.command('detach')
-@click.argument('instance')
-def detach(instance: str):
- """Deletes Tensorboard task associated with experiment name."""
- jobid = get_slurm_from_instances([instance], prefix='tb')[0]
- os.system(f'scancel {jobid}')
-
-
-@cli.command('attach')
-@click.argument('instance')
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-def attach(instance: str, local: bool):
- """Attaches Tensorboard task to experiment name."""
- attach_tb(instance, local)
-
-
-
-if __name__ == '__main__':
- cli()
diff --git a/dair_pll_old/examples/restart_run.py b/dair_pll_old/examples/restart_run.py
deleted file mode 100644
index c669e8e..0000000
--- a/dair_pll_old/examples/restart_run.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""Simple ContactNets/differentiable physics learning examples."""
-# pylint: disable=E1103
-import os
-import time
-from typing import cast
-
-import sys
-import pdb
-
-import click
-import numpy as np
-import torch
-from torch import Tensor
-import pickle
-import git
-
-from dair_pll import file_utils
-from dair_pll.drake_experiment import DrakeMultibodyLearnableExperiment
-from dair_pll.experiment import default_epoch_callback
-from dair_pll.experiment_config import SupervisedLearningExperimentConfig
-from dair_pll.multibody_learnable_system import MultibodyLearnableSystem
-from dair_pll.system import System
-
-
-
-REPO_DIR = os.path.normpath(
- git.Repo(search_parent_directories=True).git.rev_parse("--show-toplevel"))
-
-
-
-def main(storage_folder_name: str = "", run_name: str = "", regenerate: bool = True):
- """Restart a ContactNets experiment run.
-
- Args:
- storage_folder_name: name of outer storage directory.
- run_name: name of experiment run.
- """
- storage_name = os.path.join(REPO_DIR, 'results', storage_folder_name)
-
- # Combines everything into config for entire experiment.
- experiment_config = file_utils.load_configuration(storage_name, run_name)
- print(f'Loaded original experiment configuration.')
-
- # Makes experiment.
- experiment = DrakeMultibodyLearnableExperiment(experiment_config)
-
- def regenerate_callback(epoch: int, learned_system: System,
- train_loss: Tensor,
- best_valid_loss: Tensor) -> None:
- default_epoch_callback(epoch, learned_system, train_loss,
- best_valid_loss)
- cast(MultibodyLearnableSystem, learned_system).generate_updated_urdfs(
- 'progress')
-
- # Trains system and saves final results.
- print(f'\nTraining the model.')
- learned_system, stats = experiment.generate_results(
- regenerate_callback if regenerate else default_epoch_callback)
-
- # Save the final urdf.
- print(f'\nSaving the final learned URDF.')
- learned_system = cast(MultibodyLearnableSystem, learned_system)
- learned_system.generate_updated_urdfs('best')
- print(f'Done!')
-
-
-
-@click.command()
-@click.argument('storage_folder_name')
-@click.argument('run_name')
-def main_command(storage_folder_name: str, run_name: str):
- """Executes main function with argument interface."""
- assert storage_folder_name is not None
- assert run_name is not None
-
- main(storage_folder_name, run_name)
-
-
-if __name__ == '__main__':
- main_command() # pylint: disable=no-value-for-parameter
diff --git a/dair_pll_old/examples/rss_plot.py b/dair_pll_old/examples/rss_plot.py
deleted file mode 100644
index ae84341..0000000
--- a/dair_pll_old/examples/rss_plot.py
+++ /dev/null
@@ -1,260 +0,0 @@
-from collections import defaultdict
-import sys
-
-import json
-import math
-import os
-import os.path as op
-import pdb # noqa
-import re
-from typing import Any, DefaultDict, List, Tuple
-
-from matplotlib import rc, rcParams
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FormatStrFormatter, NullFormatter
-import numpy as np
-
-from dair_pll import file_utils
-
-
-
-RESULTS_DIR = os.path.join(os.path.dirname(__file__), '..', 'results', 'storage')
-OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'plots')
-
-
-rc('legend', fontsize=30)
-plt.rc('axes', titlesize=40) # fontsize of the axes title
-plt.rc('axes', labelsize=40) # fontsize of the x and y labels
-
-yscale = 1
-use_logs = [True, True, True,
- False, False, False, False]
-plot_points = False
-
-params_to_track = {'cube_body_len_x': 0.1048, 'cube_body_len_y': 0.1048,
- 'cube_body_len_z': 0.1048, 'cube_body_mu': 0.15}
-yfields = ['train_model_trajectory_mse_mean',
- 'valid_model_trajectory_mse_mean',
- 'train_loss',
- 'cube_body_len_x',
- 'cube_body_len_y',
- 'cube_body_len_z',
- 'cube_body_mu']
-ylabels = ['Trajectory state space error (training)',
- 'Trajectory state space error',
- 'Training loss',
- 'Cube x length (normalized)',
- 'Cube y length (normalized)',
- 'Cube z length (normalized)',
- 'Friction coefficient (normalized)']
-val_scales = [1.0, 1.0, 1.0,
- 1.0/0.1048, 1.0/0.1048, 1.0/0.1048, 1.0/0.15]
-
-for (yfield, ylabel, val_scale, use_log) in zip(yfields, ylabels, val_scales, use_logs):
- models = {'ContactNets, L': ['cn_.+-0', 'cn_.+-2', 'cn_.+-4', 'cn_.+-6'],
- 'ContactNets, S': ['cn_.+-1', 'cn_.+-3', 'cn_.+-5', 'cn_.+-7'],
- 'DiffSim, L': ['ds_.+-0', 'ds_.+-2', 'ds_.+-4', 'ds_.+-6'],
- 'DiffSim, S': ['ds_.+-1', 'ds_.+-3', 'ds_.+-5', 'ds_.+-7']}
- label_lookup = {'cn_.+-0': 'ContactNets, L',
- 'cn_.+-1': 'ContactNets, S',
- 'cn_.+-2': 'ContactNets, L',
- 'cn_.+-3': 'ContactNets, S',
- 'cn_.+-4': 'ContactNets, L',
- 'cn_.+-5': 'ContactNets, S',
- 'cn_.+-6': 'ContactNets, L',
- 'cn_.+-7': 'ContactNets, S',
- 'ds_.+-0': 'DiffSim, L',
- 'ds_.+-1': 'DiffSim, S',
- 'ds_.+-2': 'DiffSim, L',
- 'ds_.+-3': 'DiffSim, S',
- 'ds_.+-4': 'DiffSim, L',
- 'ds_.+-5': 'DiffSim, S',
- 'ds_.+-6': 'DiffSim, L',
- 'ds_.+-7': 'DiffSim, S'}
- color_lookup = {'DiffSim, L': '#95001a', 'ContactNets, L': '#01256e',
- 'DiffSim, S': '#92668d', 'ContactNets, S': '#398537'} #4a0042
-
- print(f'\n\n========== Starting {yfield} ==========')
-
- def num(s: str):
- try:
- return int(s)
- except ValueError:
- return float(s)
-
- def load_results(instance_regex: str) -> Tuple[DefaultDict[int, List[Any]], bool]:
- pattern = re.compile(instance_regex + '\Z')
- results = defaultdict(list)
-
- # load results from previous tests
- for instance_name in os.listdir(RESULTS_DIR):
- if (pattern.match(instance_name)) and '64' not in instance_name:
- # print(f'\tFound {instance_name} folder...')
-
- params_file = op.join(RESULTS_DIR, instance_name, 'params.txt')
-
- if not os.path.isfile(params_file):
- print(f'\t\t--> did not find params_file in {instance_name}')
- continue
-
- data_size = int(instance_name.split('_')[-1].split('-')[0])
-
- stats = read_params_file(params_file)
- results[int(data_size)].append(stats)
-
- return results
-
- def read_params_file(file_name):
- file = open(file_name, "r")
-
- filestr = file.read().replace('\'', '')
-
- stats = {}
- for key in yfields:
- stats[key] = float(filestr.split(f'{key}: ')[-1].split(',')[0].split('}')[0])
-
- return stats
-
- def extract_xys(results, y_field):
- extracted = defaultdict(list)
- for i in results.keys():
- for result in results[i]:
- extracted[i].append(float(result[y_field] * val_scale))
- return extracted
-
- def extract_points(results, y_field):
- extracted = extract_xys(results, y_field)
- xs, ys = [], []
- for x in extracted.keys():
- for y in extracted[x]:
- xs.append(x)
- ys.append(y)
- return xs, ys
-
- def scatter_to_t_conf_int_plot(extracted):
- # the following are t values for 95% confidence interval
- t_per_dof = {1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776,
- 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306,
- 9: 2.262, 10: 2.228, 0: 0.5}
-
- means, lowers, uppers = {}, {}, {}
-
- for k, v in extracted.items():
- dof = len(v) - 1
- means[k] = np.mean(v)
- lowers[k] = np.mean(v) - t_per_dof[dof]*np.std(v)/np.sqrt(dof+1)
- uppers[k] = np.mean(v) + t_per_dof[dof]*np.std(v)/np.sqrt(dof+1)
-
- xs = list(means.keys())
- ys, y_lowers, y_uppers = [], [], []
-
- for x in xs:
- ys.append(means[x])
- y_lowers.append(lowers[x])
- y_uppers.append(uppers[x])
-
- xs, ys, y_lowers, y_uppers = zip(*sorted(zip(xs, ys, y_lowers, y_uppers)))
-
- return xs, ys, y_lowers, y_uppers
-
- def get_data_counts(extracted):
- return {k: len(v) for k, v in extracted.items()}
-
- fig = plt.figure()
- ax = plt.gca()
-
- for model in models.keys():
- print(f'Working on {model}:', end='')
-
- dicts = []
- for mod in models[model]:
- results = load_results(mod)
- dicts.append(results)
-
- combined_results = {}
- for k in dicts[0].keys():
- combined_results[k] = []
- for d in dicts:
- for item in d[k]:
- combined_results[k].append(item)
-
- results = combined_results
- prefix = ''
-
- if plot_points:
- xs, ys = extract_points(results, prefix + yfield)
- xs = [x / 2 for x in xs]
- plt.scatter(xs, ys, s=200, c=color_lookup[model],
- label=label_lookup[model], alpha=0.5)
- else:
- extracted = extract_xys(results, prefix + yfield)
- print(f' with counts {get_data_counts(extracted)}')
- xs, ys, y_lowers, y_uppers = scatter_to_t_conf_int_plot(extracted)
- xs = [x / 2 for x in xs]
- ax.plot(xs, ys, label=model, linewidth=5, color=color_lookup[model])
- ax.fill_between(xs, y_lowers, y_uppers, alpha=0.3, color=color_lookup[model])
-
- ax.set_xscale('log')
- if use_log:
- ax.set_yscale('log')
- elif yfield == 'cube_body_mu':
- ax.set_ylim(0, 3.5)
- else:
- ax.set_ylim(0, 1.5)
-
- xs = [2 * 2**j for j in range(0, 4)]
- ax.set_xlim(min(xs), max(xs))
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- xs_rounded = [round(x, 1) for x in xs]
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(xs_rounded)
- ax.set_xticklabels(xs_rounded)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
- if ('body_len' in yfield) or ('body_mu' in yfield):
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1f"))
- else:
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
-
- plt.xlabel('Training tosses')
- plt.ylabel(ylabel)
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- plt.legend(handles, labels)
- plt.legend(loc=1, prop=dict(weight='bold'))
-
- fig.set_size_inches(13, 13)
-
- fig.savefig(f'{OUTPUT_DIR}/{yfield}.png', dpi=100)
- # fig.savefig(f'{OUTPUT_DIR}/tp_{yfield}.png', transparent=True, dpi=100)
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/examples/run_cluster.bash b/dair_pll_old/examples/run_cluster.bash
deleted file mode 100644
index d612a31..0000000
--- a/dair_pll_old/examples/run_cluster.bash
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#SBATCH --gpus=0
-#SBATCH --mem-per-cpu=10G
-#SBATCH --cpus-per-task=8
-#SBATCH --qos=mp-med
-#SBATCH --time=12:00:00
-#SBATCH --job-name='pll'
-
-source /home/mengti/workspace/dair_pll/pll_env/bin/activate;
-export PYTHONPATH=${PWD}:${PYTHONPATH}
-
-dataset_sizes=(9)
-
-for size in "${dataset_sizes[@]}"; do
- for run_idx in {10..15}; do
- cmd="WANDB__SERVICE_WAIT=300 PYTHONUNBUFFERED=1 xvfb-run --server-args=\"-screen 0 800x600x24\" python3 examples/bundlesdf_simple.py --structured --system=bundlesdf_cube --geometry=polygon --source=real --contactnets --regenerate --no-residual --loss-variation=1 --inertia-params=0 --dataset-size $size 'final_gt_mesh' 'final_gt_mesh-${run_idx}'"
- echo "Running: $cmd"
- eval $cmd
- done
-done
diff --git a/dair_pll_old/examples/run_local.bash b/dair_pll_old/examples/run_local.bash
deleted file mode 100644
index 6c1cc04..0000000
--- a/dair_pll_old/examples/run_local.bash
+++ /dev/null
@@ -1,12 +0,0 @@
-source /home/cnets-vision/mengti_ws/dair_pll/cnets_env/bin/activate;
-export PYTHONPATH=${PWD}:${PYTHONPATH}
-
-dataset_sizes=(9)
-
-for size in "${dataset_sizes[@]}"; do
- for run_idx in {10..15}; do
- cmd="xvfb-run python3 examples/bundlesdf_simple.py --structured --system=bundlesdf_cube --geometry=polygon --source=real --contactnets --regenerate --no-residual --loss-variation=1 --inertia-params=0 --dataset-size $size 'final_gt_mesh' 'final_gt_mesh-${run_idx}'"
- echo "Running: $cmd"
- eval $cmd
- done
-done
diff --git a/dair_pll_old/examples/startup.bash b/dair_pll_old/examples/startup.bash
deleted file mode 100644
index 8fd23f9..0000000
--- a/dair_pll_old/examples/startup.bash
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --gpus=0
-#SBATCH --mem-per-cpu=10G
-##SBATCH --qos=mp-med
-##SBATCH --partition=posa-compute
-##SBATCH --account mp-account
-#SBATCH --time=12:00:00
-#SBATCH --job-name=pll_{run_name}
-#SBATCH --output={pll_dir}/logs/slurm_{run_name}.txt
-
-echo "display" >> {pll_dir}/logs/start_{run_name}.txt
-source /mnt/kostas-graid/sw/envs/bibit/pll_env/bin/activate;
-export PYTHONPATH=/mnt/kostas-graid/sw/envs/bibit:{pll_dir};
-export PLL_EXPERIMENT={run_name};
-
-
-echo "repo at hash {hash}" >> {pll_dir}/logs/start_{run_name}.txt
-
-if {restart}; then
- echo "restarting" >> {pll_dir}/logs/start_{run_name}.txt
- WANDB__SERVICE_WAIT=300 PYTHONUNBUFFERED=1 xvfb-run --server-num="$SLURM_JOBID" --server-args="-screen 0 800x600x24" python {pll_dir}/examples/restart_run.py {storage_folder_name} {run_name} >> {pll_dir}/logs/train_{run_name}.txt
-else
- export WANDB_RUN_GROUP={wandb_group_id};
- echo "setting wandb run group to {wandb_group_id}" >> {pll_dir}/logs/start_{run_name}.txt
-
- echo "train" >> {pll_dir}/logs/start_{run_name}.txt
- WANDB__SERVICE_WAIT=300 PYTHONUNBUFFERED=1 xvfb-run --server-num="$SLURM_JOBID" --server-args="-screen 0 800x600x24" python {pll_dir}/examples/contactnets_simple.py {storage_folder_name} {run_name} {train_args} >> {pll_dir}/logs/train_{run_name}.txt
-fi
diff --git a/dair_pll_old/examples/startup_local.bash b/dair_pll_old/examples/startup_local.bash
deleted file mode 100644
index e9027e4..0000000
--- a/dair_pll_old/examples/startup_local.bash
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-echo "display"
-source {pll_dir}/pll_env/bin/activate;
-# export PYTHONPATH={pll_dir}; # commented out bc added to pll_env/bin/activate
-export PLL_EXPERIMENT={run_name};
-
-
-echo "repo at hash {hash}"
-
-if {restart}; then
- echo "restarting"
- PYTHONFAULTHANDLER=1 python {pll_dir}/examples/restart_run.py {storage_folder_name} {run_name} {train_args}
-else
- export WANDB_RUN_GROUP={wandb_group_id};
- echo "setting wandb run group to {wandb_group_id}"
-
- echo "train"
- PYTHONFAULTHANDLER=1 python {pll_dir}/examples/contactnets_simple.py {storage_folder_name} {run_name} {train_args}
-fi
diff --git a/dair_pll_old/examples/tensorboard.bash b/dair_pll_old/examples/tensorboard.bash
deleted file mode 100644
index 19de74a..0000000
--- a/dair_pll_old/examples/tensorboard.bash
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-#SBATCH --qos=viz
-#SBATCH --partition=viz
-#SBATCH --cores=1
-
-PORT_MAP=/tmp/tensorboard_port_map
-
-TB_PORT=$(cat $PORT_MAP | grep "$SLURM_JOBID," | cut -d',' -f2)
-IP_ADDRESS=$(hostname -I | cut -d' ' -f1)
-
-TB_FOLDER=$1
-TB_TITLE=$2
-
-echo "http://$IP_ADDRESS:$TB_PORT"
-
-tensorboard --samples_per_plugin="images=0" --bind_all --logdir $TB_FOLDER --window_title $TB_TITLE --port $TB_PORT
diff --git a/dair_pll_old/examples/tensorboard_local.bash b/dair_pll_old/examples/tensorboard_local.bash
deleted file mode 100644
index bb37355..0000000
--- a/dair_pll_old/examples/tensorboard_local.bash
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-#SBATCH --qos=viz
-#SBATCH --partition=viz
-#SBATCH --cores=1
-
-
-TB_FOLDER=$1
-TB_TITLE=$2
-
-tensorboard --samples_per_plugin="images=0" --bind_all --logdir $TB_FOLDER --window_title $TB_TITLE
diff --git a/dair_pll_old/external_licenses/brossard_ukf.LICENSE.md b/dair_pll_old/external_licenses/brossard_ukf.LICENSE.md
deleted file mode 100644
index 86daca7..0000000
--- a/dair_pll_old/external_licenses/brossard_ukf.LICENSE.md
+++ /dev/null
@@ -1,13 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2019, CAOR, MinesParistech. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
- Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dair_pll_old/external_licenses/create-demo.LICENSE b/dair_pll_old/external_licenses/create-demo.LICENSE
deleted file mode 100644
index fd37eeb..0000000
--- a/dair_pll_old/external_licenses/create-demo.LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2019 Anne Gentle
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/dair_pll_old/external_licenses/drake.LICENSE b/dair_pll_old/external_licenses/drake.LICENSE
deleted file mode 100644
index c53ba66..0000000
--- a/dair_pll_old/external_licenses/drake.LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-All components of Drake are licensed under the BSD 3-Clause License
-shown below. Where noted in the source code, some portions may
-be subject to other permissive, non-viral licenses.
-
-Copyright 2012-2022 Robot Locomotion Group @ CSAIL
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer. Redistributions
-in binary form must reproduce the above copyright notice, this list of
-conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution. Neither the name of
-the Massachusetts Institute of Technology nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dair_pll_old/helpers/copy_contactnets_dataset.py b/dair_pll_old/helpers/copy_contactnets_dataset.py
deleted file mode 100644
index 3e89935..0000000
--- a/dair_pll_old/helpers/copy_contactnets_dataset.py
+++ /dev/null
@@ -1,148 +0,0 @@
-'''
-Helper script to access the ContactNets elbow dataset and copy into PLL in the expected format.
-
-The ContactNets format is in:
- [ position quaternion artic_angle linear_velocity angular_velocity artic_velocity ]
- where:
- - position: [x, y, z] in BLOCK_HALF_WIDTHS
- - quaternion: [qw, qx, qy, qz]
- - artic_angle: [theta] in rad
- - linear_velocity: [vx, vy, vz] in BLOCK_HALF_WIDTHS/second
- - angular_velocity: [wx, wy, wz] in rad/second in body frame
- - artic_velocity: [dtheta] in rad/second
-
-The PLL format is in:
- [ quaternion position artic_angle angular_velocity linear_velocity artic_velocity ]
- where:
- - position: [x, y, z] in meters
- - quaternion: [qw, qx, qy, qz]
- - artic_angle: [theta] in rad
- - linear_velocity: [vx, vy, vz] in meters/second
- - angular_velocity: [wx, wy, wz] in rad/second in body frame
- - artic_velocity: [dtheta] in rad/second
-'''
-
-import pdb
-import os
-import torch
-from torch import Tensor
-
-
-LAB_COMPUTER = False
-
-COPY_DATASET_TO_PLL = False
-ELIMINATE_EMPTY_TOSSES = True
-
-if LAB_COMPUTER:
- REPO_DIR = '/home/bibit/SoPhTER/'
-else:
- REPO_DIR = '/Users/bibit/Documents/College/Penn/Quals/repo/SoPhTER/'
-
-PLL_DIR = '/Users/bibit/Documents/College/Penn/DAIRLab/pll_env/dair_pll/'
-
-BLOCK_HALF_WIDTH = 0.050
-METERS_PER_BHW = BLOCK_HALF_WIDTH
-
-
-###############################################################################################
-########## COPY DATASET TO PLL FORMAT ##########
-###############################################################################################
-def copy_dataset_to_pll_format():
- assert LAB_COMPUTER == False
-
- SOURCE_DIR = REPO_DIR + 'data/rect_elbow3d/franka/'
- TARGET_DIR = PLL_DIR + 'assets/contactnets_elbow/'
-
- # copy every real toss to TARGET_DIR after rearranging state vector to expected format.
- for i in range(601):
- # check if the toss exists in the source directory
- try:
- toss = torch.load(SOURCE_DIR + str(i) + '.pt')
- except:
- print(f'Skipping toss {i}.')
- continue
-
- print(f'Found toss {i}...', end='')
-
- # split the toss into its individual portions
- tsteps = toss.shape[0]
-
- pos = toss[:, :3]
- quat = toss[:, 3:7]
- artic = toss[:, 7].reshape(tsteps, 1)
- vels = toss[:, 8:11]
- ang_vels = toss[:, 11:14]
- artic_vel = toss[:, 14].reshape(tsteps, 1)
-
- # convert to correct units
- pos_pll = pos * METERS_PER_BHW
- vels_pll = vels * METERS_PER_BHW
-
- # combine in correct order for PLL convention
- pll_toss = torch.cat((quat, pos_pll, artic, ang_vels, vels_pll, artic_vel), dim=1)
-
- # pdb.set_trace()
- torch.save(pll_toss, TARGET_DIR + str(i) + '.pt')
- print(f' converted and copied!')
-
-def check_pll_cube_format():
- print(f'Checking PLL cube format:')
- assert LAB_COMPUTER == False
-
- CUBE_PLL_DIR = PLL_DIR + 'assets/contactnets_cube/'
-
- file_name = CUBE_PLL_DIR + str(0) + '.pt'
- pll_data = torch.load(file_name)
-
- pdb.set_trace()
-
-def check_cn_elbow_format():
- print(f'Checking ContactNets elbow format:')
- ELBOW_CN_DIR = REPO_DIR + 'data/rect_elbow3d/franka/'
-
- file_name = ELBOW_CN_DIR + str(0) + '.pt'
- cn_data = torch.load(file_name)
-
- pdb.set_trace()
-
-###############################################################################################
-########## ELIMINATE EMPTY TOSSES ##########
-###############################################################################################
-def eliminate_empty_tosses():
- # goal is to search through the toss directory and eliminate any gaps between toss numbers,
- # e.g. to convert '0.pt, 2.pt, 3.pt' to '0.pt, 1.pt, 2.pt'.
- TARGET_DIR = PLL_DIR + 'assets/contactnets_elbow/'
-
- move_to_i = 0
- for orig_i in range(601):
- # check if the toss exists already
- try:
- toss = torch.load(TARGET_DIR + str(orig_i) + '.pt')
- except:
- print(f'Skipping toss {orig_i}.')
- continue
-
- # here, we know that toss orig_i exists.
- print(f'Found toss {orig_i} --> moving it to toss {move_to_i}.')
-
- # delete the original file
- os.remove(TARGET_DIR + str(orig_i) + '.pt')
-
- # copy the trajectory data over to toss move_to_i, and increment move_to_i
- torch.save(toss, TARGET_DIR + str(move_to_i) + '.pt')
- move_to_i += 1
-
-
-###############################################################################################
-########## TESTS ##########
-###############################################################################################
-
-if COPY_DATASET_TO_PLL:
- # check_pll_cube_format()
- # check_cn_elbow_format()
- copy_dataset_to_pll_format()
-
-if ELIMINATE_EMPTY_TOSSES:
- eliminate_empty_tosses()
-
-
diff --git a/dair_pll_old/helpers/corl_gather_gravity_results.py b/dair_pll_old/helpers/corl_gather_gravity_results.py
deleted file mode 100644
index ccc3e8d..0000000
--- a/dair_pll_old/helpers/corl_gather_gravity_results.py
+++ /dev/null
@@ -1,385 +0,0 @@
-"""Script to help generate plots for CoRL 2023 submission.
-
-First, this script compiles all results into a json file. Then, this script can
-gather the results from the json file and generate plots from them.
-
-The json file has the following format:
-{
- experiment_1: {
- system: cube/elbow
- prefix: e.g. sc for 'cube' experiments
- gravity_sweep: {
- gravity_frac_1: {
- run_1: {
- structured: bool
- contactnets: bool
- loss_variation: int
- residual: bool
- g_frac: float
- result_set: test/validation
- results: {
- metric_1: float
- metric_2: float
- ...
- }
- learned_params: {
- body_1: {
- param_1: float
- param_2: float
- ...
- }
- body_2: {...}
- }
- target_trajs: [] <-- excluded from json due to datatype
- prediction_trajs: [] <-- excluded from json due to datatype
- }
- run_2: {...}
- run_3: {...}
- ...
- }
- gravity_frac_2: {...}
- gravity_frac_3: {...}
- ...
- }
- }
- experiment_2: {...}
- experiment_3: {...}
- ...
-}
-...where `experiment_1` might be 'cube', corresponding to the real data sweep
-results.
-"""
-
-import json
-import os
-import os.path as op
-import pdb
-import pickle
-import torch
-from copy import deepcopy
-
-import numpy as np
-
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import MultibodyLosses
-from dair_pll.geometry import _NOMINAL_HALF_LENGTH
-from dair_pll.inertia import InertialParameterConverter
-
-
-"""Note: might need the below in drake_experiment.py for backwards
-compatibility:
-
-@dataclass
-class DrakeMultibodyLearnableExperimentConfig(SupervisedLearningExperimentConfig
- ):
- visualize_learned_geometry: bool = True
- \"""Whether to use learned geometry in trajectory overlay visualization.\"""
-"""
-
-
-# Directory management.
-RESULTS_DIR = op.join(op.dirname(__file__), '..', 'results')
-OUTPUT_DIR = op.join(op.dirname(__file__), '..', 'plots')
-JSON_OUTPUT_FILE = op.join(op.dirname(__file__), 'gravity_results.json')
-
-BODY_NAMES_BY_SYSTEM = {'cube': ['body'], 'elbow': ['elbow_1', 'elbow_2'],
- 'asymmetric': ['body']}
-BODY_PARAMETERS = {
- 'm': 'Mass',
- 'com_x': 'CoM x',
- 'com_y': 'CoM y',
- 'com_z': 'CoM z',
- 'I_xx': 'I_xx',
- 'I_yy': 'I_yy',
- 'I_zz': 'I_zz',
- 'I_xy': 'I_xy',
- 'I_xz': 'I_xz',
- 'I_yz': 'I_yz',
- 'mu': 'Friction coefficient',
- 'center_x': 'Geometry center x',
- 'center_y': 'Geometry center y',
- 'center_z': 'Geometry center z',
- 'diameter_x': 'Geometry diameter x',
- 'diameter_y': 'Geometry diameter x',
- 'diameter_z': 'Geometry diameter x'}
-POLYGON_GEOMETRY_PARAMETERS = ['center_x', 'center_y', 'center_z',
- 'diameter_x', 'diameter_y', 'diameter_z']
-
-INERTIA_KEY = 'multibody_terms.lagrangian_terms.inertial_parameters'
-FRICTION_KEY = 'multibody_terms.contact_terms.friction_params'
-GEOMETRY_PREFIX = 'multibody_terms.contact_terms.geometries'
-GEOMETRY_KEY_BODY_1 = f'{GEOMETRY_PREFIX}.2.vertices_parameter'
-GEOMETRY_KEY_BODY_2 = f'{GEOMETRY_PREFIX}.0.vertices_parameter'
-# GEOMETRY_KEY2 = 'multibody_terms.contact_terms.geometries.0.length_params'
-
-FRICTION_INDEX_BY_BODY_NAME = {'body': 0, 'elbow_2': 0, 'elbow_1': 2}
-
-PERFORMANCE_METRICS = ['delta_v_squared_mean', 'v_plus_squared_mean',
- 'model_loss_mean', 'oracle_loss_mean',
- 'model_trajectory_mse_mean', 'oracle_trajectory_mse_mean',
- 'model_pos_int_traj', 'oracle_pos_int_traj',
- 'model_angle_int_traj', 'oracle_angle_int_traj',
- 'model_penetration_int_traj', 'oracle_penetration_int_traj']
-
-GRAVITY_FRACTIONS = {0: 0., 1: 0.5, 2: 1., 3: 1.5, 4: 2.}
-SYSTEMS = ['cube', 'elbow', 'asymmetric']
-ORDERED_INERTIA_PARAMS = ['m', 'px', 'py', 'pz', 'I_xx', 'I_yy', 'I_zz',
- 'I_xy', 'I_xz', 'I_yz']
-TARGET_SAMPLE_KEY = 'model_target_sample'
-PREDICTION_SAMPLE_KEY = 'model_prediction_sample'
-
-
-# Template dictionaries, from low- to high-level.
-RUN_DICT = {'structured': None, 'contactnets': None, 'loss_variation': None,
- 'residual': None, 'result_set': None, 'results': None,
- 'learned_params': None}
-EXPERIMENT_DICT = {'system': None, 'prefix': None,
- 'gravity_sweep': None}
-
-BAD_RUN_NUMBERS = [0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 15, 16, 17, 18, 19, 21, 22]
-
-# Prepend the below with 'gravity_sweep_' and postpend with '-#' to get the
-# folders.
-EXPERIMENTS = {#'cube': {'system': 'cube', 'prefix': 'gc'},
- 'elbow': {'system': 'elbow', 'prefix': 'ge'}}
-
-g_index_to_use = 0
-
-
-# ============================= Helper functions ============================= #
-# Return an empty data sweep dictionary, to prevent unintended data retention.
-def make_empty_gravity_sweep_dict():
- new_dict = {}
- for grav_i in GRAVITY_FRACTIONS.keys():
- new_dict.update({GRAVITY_FRACTIONS[grav_i]: {}})
- return new_dict
-
-
-# Extract information out of a configuration object.
-def get_run_info_from_config(config):
- run_dict = deepcopy(RUN_DICT)
-
- run_dict['structured'] = False if \
- isinstance(config.learnable_config, DeepLearnableSystemConfig) else True
-
- if run_dict['structured']:
- run_dict['contactnets'] = True if \
- config.learnable_config.loss==MultibodyLosses.CONTACTNETS_LOSS \
- else False
- run_dict['loss_variation'] = config.learnable_config.loss_variation
- run_dict['residual'] = config.learnable_config.do_residual
- run_dict['g_frac'] = config.learnable_config.g_frac
- run_dict['result_set'] = 'test'
-
- else:
- # Can randomly (but evenly) assign gravity fraction to end-to-end
- # experiments since the g_frac doesn't affect the experiment.
- global g_index_to_use
- run_dict['g_frac'] = GRAVITY_FRACTIONS[g_index_to_use]
- g_index_to_use = (g_index_to_use+1) % 5
-
- run_dict['result_set'] = 'test'
- run_name = config.run_name
-
- return run_name, run_dict
-
-
-# Calculate geometry measurements from a set of polygon vertices.
-def get_geometry_metrics_from_params(geom_params):
- # First, convert the parameters to meters.
- vertices = geom_params * _NOMINAL_HALF_LENGTH
-
- # Extract diameters and centers.
- mins = vertices.min(axis=0).values
- maxs = vertices.max(axis=0).values
-
- diameters = maxs - mins
- centers = (maxs + mins)/2
-
- geom_dict = {'diameter_x': diameters[0].item(),
- 'diameter_y': diameters[1].item(),
- 'diameter_z': diameters[2].item(),
- 'center_x': centers[0].item(),
- 'center_y': centers[1].item(),
- 'center_z': centers[2].item(),
- 'vertices': vertices.tolist()}
- return geom_dict
-
-def geometry_keys_by_sys_and_bodies(system, body_name):
- if system == 'cube' or system == 'asymmetric':
- return {'body': GEOMETRY_KEY_BODY_2}
- return {'elbow_1': GEOMETRY_KEY_BODY_1, 'elbow_2': GEOMETRY_KEY_BODY_2}
-
-
-# Get individual physical parameters from best learned system state.
-def get_physical_parameters(system, body_names, best_system_state):
- physical_params_dict = {}
-
- theta = best_system_state[INERTIA_KEY]
- friction_params = best_system_state[FRICTION_KEY]
- if GEOMETRY_KEY_BODY_2 in best_system_state.keys():
- geometry_keys = geometry_keys_by_sys_and_bodies(system, body_names)
- else:
- geometry_keys = {}
- print(f'\t\tFound non-polygon; won\'t gather geometry results.')
-
- inertia_pi_cm_params = InertialParameterConverter.theta_to_pi_cm(theta)
-
- # Loop over each body.
- for i in range(len(body_names)):
- body = body_names[i]
-
- # First, get the inertial parameters.
- i_params = inertia_pi_cm_params[i, :]
- i_params[1:4] /= i_params[0].item() # Divide out the mass.
-
- body_params = {}
-
- for j in range(10):
- body_params.update({ORDERED_INERTIA_PARAMS[j]: i_params[j].item()})
-
- # Second, get the friction parameters.
- mu_index = FRICTION_INDEX_BY_BODY_NAME[body]
- body_params.update({'mu': friction_params[mu_index].item()})
-
- # Third, get the geometry parameters.
- try:
- geometry_params = best_system_state[geometry_keys[body]]
- geom_dict = get_geometry_metrics_from_params(geometry_params)
- body_params.update(geom_dict)
- except:
- pass
-
- # Store the results.
- physical_params_dict.update({body: body_params})
-
- return physical_params_dict
-
-
-# Extract the desired statistics from the larger stats file. Will convert
-# numpy arrays into averages.
-def get_performance_from_stats(stats, set_name):
- performance_dict = {}
- for metric in PERFORMANCE_METRICS:
- key = f'{set_name}_{metric}'
- try:
- if type(stats[key]) == np.ndarray:
- performance_dict.update({key: np.average(stats[key])})
- else:
- performance_dict.update({key: stats[key]})
- except:
- print(f'\t\tDidn\'t find {key} in stats...')
- return performance_dict
-
-
-# Extract the target and prediction trajectories from the larger stats file.
-# This isn't called since the datatype isn't json serializable, but keeping this
-# function here for future reference.
-def get_sample_trajectories_from_stats(stats, set_name):
- targets, predictions = [], []
-
- target_key = f'{set_name}_{TARGET_SAMPLE_KEY}'
- try:
- targets = stats[target_key]
- except:
- print(f'\t\tDidn\'t find {target_key} in stats...')
-
- prediction_key = f'{set_name}_{PREDICTION_SAMPLE_KEY}'
- try:
- predictions = stats[prediction_key]
- except:
- print(f'\t\tDidn\'t find {prediction_key} in stats...')
-
- return targets, predictions
-
-
-# Get run configuration, statistics, and checkpoint objects. Returns None for
-# any that don't exist.
-def get_config_stats_checkpoint(runs_path, run):
- config, stats, checkpoint = None, None, None
-
- config_file = op.join(runs_path, run, 'config.pkl')
- if op.exists(config_file):
- with open(config_file, 'rb') as file:
- config = pickle.load(file)
-
- stats_file = op.join(runs_path, run, 'statistics.pkl')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
-
- checkpoint_file = op.join(runs_path, run, 'checkpoint.pt')
- if op.exists(checkpoint_file):
- checkpoint = torch.load(checkpoint_file)
-
- return config, stats, checkpoint
-
-
-# =============================== Gather data ================================ #
-# Loop over dataset categories, then dataset size, then individual runs.
-runs_needing_statistics = []
-results = {}
-
-sent_warning = False
-
-for experiment in EXPERIMENTS.keys():
- print(f'\n\n============== Starting {experiment} ==============')
- exp_dict = deepcopy(EXPERIMENT_DICT)
- system = EXPERIMENTS[experiment]['system']
- exp_dict['system'] = system
- exp_dict['prefix'] = EXPERIMENTS[experiment]['prefix']
- exp_dict['gravity_sweep'] = make_empty_gravity_sweep_dict()
-
- body_names = BODY_NAMES_BY_SYSTEM[system]
-
- results_folder_name = f'gravity_sweep_{experiment}'
- runs_path = op.join(RESULTS_DIR, results_folder_name, 'runs')
- if not op.isdir(runs_path):
- print(f'Could not find {results_folder_name} runs; skipping.')
- continue
-
- print(f'\nFound {results_folder_name}.')
-
- for run in os.listdir(runs_path):
- if int(run[2:4]) in BAD_RUN_NUMBERS:
- continue
- if not sent_warning:
- print(f'WARNING: Skipping run numbers {BAD_RUN_NUMBERS}')
- sent_warning = True
-
- config, stats, checkpoint = \
- get_config_stats_checkpoint(runs_path, run)
-
- if stats == None:
- print(f'\tNo stats file for {run}; skipping.')
- runs_needing_statistics.append(
- op.join(runs_path, run).split('results/')[-1])
- continue
-
- assert config != None and checkpoint != None
- print(f'\tFound statistics for {run}.')
-
- run_key, run_dict = get_run_info_from_config(config)
-
- performance_dict = \
- get_performance_from_stats(stats, run_dict['result_set'])
- run_dict['results'] = performance_dict
-
- if run_dict['structured']:
- best_system_state = checkpoint['best_learned_system_state']
- params_dict = get_physical_parameters(system, body_names,
- best_system_state)
- run_dict['learned_params'] = params_dict
-
- grav_frac = run_dict['g_frac']
- exp_dict['gravity_sweep'][grav_frac].update({run_key: run_dict})
-
- results.update({experiment: exp_dict})
-
-print(f'\n\nSaving results to json file.')
-with open(JSON_OUTPUT_FILE, 'w') as file:
- json.dump(results, file, indent=2)
-
-pdb.set_trace()
-
-print(f'\n\nRuns needing statistics: {runs_needing_statistics}')
diff --git a/dair_pll_old/helpers/corl_gather_results.py b/dair_pll_old/helpers/corl_gather_results.py
deleted file mode 100644
index 32ac27f..0000000
--- a/dair_pll_old/helpers/corl_gather_results.py
+++ /dev/null
@@ -1,451 +0,0 @@
-"""Script to help generate plots for CoRL 2023 submission.
-
-First, this script compiles all results into a json file. Then, this script can
-gather the results from the json file and generate plots from them.
-
-The json file has the following format:
-{
- experiment_1: {
- system: cube/elbow/asymmetric
- prefix: e.g. sc for 'cube' experiments
- data_sweep: {
- dataset_size_1: {
- run_1: {
- structured: bool
- contactnets: bool
- loss_variation: int
- residual: bool
- result_set: test/validation
- results: {
- metric_1: float
- metric_2: float
- ...
- }
- learned_params: {
- body_1: {
- param_1: float
- param_2: float
- ...
- }
- body_2: {...}
- }
- post_results: {
- post_metric_1: float
- post_metric_2: float
- ...
- }
- fixed_horizon_post_results: {
- fixed_horizon_metric_1: float
- fixed_horizon_metric_2: float
- ...
- }
- target_trajs: [] <-- excluded from json due to datatype
- prediction_trajs: [] <-- excluded from json due to datatype
- }
- run_2: {...}
- run_3: {...}
- ...
- }
- dataset_size_2: {...}
- dataset_size_3: {...}
- ...
- }
- }
- experiment_2: {...}
- experiment_3: {...}
- ...
-}
-...where `experiment_1` might be 'cube', corresponding to the real data sweep
-results.
-"""
-
-import json
-import os
-import os.path as op
-import pdb
-import pickle
-import torch
-from copy import deepcopy
-
-import numpy as np
-
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import MultibodyLosses
-from dair_pll.geometry import _NOMINAL_HALF_LENGTH
-from dair_pll.inertia import InertialParameterConverter
-
-
-"""Note: might need the below in drake_experiment.py for backwards
-compatibility:
-
-@dataclass
-class DrakeMultibodyLearnableExperimentConfig(SupervisedLearningExperimentConfig
- ):
- visualize_learned_geometry: bool = True
- \"""Whether to use learned geometry in trajectory overlay visualization.\"""
-"""
-
-
-# Directory management.
-RESULTS_DIR = op.join(op.dirname(__file__), '..', 'results')
-OUTPUT_DIR = op.join(op.dirname(__file__), '..', 'plots')
-JSON_OUTPUT_FILE = op.join(op.dirname(__file__), 'results.json')
-
-ROLLOUT_LENGTHS = [1, 2, 4, 8, 16, 32, 64, 120]
-
-BODY_NAMES_BY_SYSTEM = {'cube': ['body'], 'elbow': ['elbow_1', 'elbow_2'],
- 'asymmetric': ['body']}
-BODY_PARAMETERS = {
- 'm': 'Mass',
- 'com_x': 'CoM x',
- 'com_y': 'CoM y',
- 'com_z': 'CoM z',
- 'I_xx': 'I_xx',
- 'I_yy': 'I_yy',
- 'I_zz': 'I_zz',
- 'I_xy': 'I_xy',
- 'I_xz': 'I_xz',
- 'I_yz': 'I_yz',
- 'mu': 'Friction coefficient',
- 'center_x': 'Geometry center x',
- 'center_y': 'Geometry center y',
- 'center_z': 'Geometry center z',
- 'diameter_x': 'Geometry diameter x',
- 'diameter_y': 'Geometry diameter x',
- 'diameter_z': 'Geometry diameter x'}
-POLYGON_GEOMETRY_PARAMETERS = ['center_x', 'center_y', 'center_z',
- 'diameter_x', 'diameter_y', 'diameter_z']
-
-INERTIA_KEY = 'multibody_terms.lagrangian_terms.inertial_parameters'
-FRICTION_KEY = 'multibody_terms.contact_terms.friction_params'
-GEOMETRY_PREFIX = 'multibody_terms.contact_terms.geometries'
-GEOMETRY_KEY_BODY_1 = f'{GEOMETRY_PREFIX}.2.vertices_parameter'
-GEOMETRY_KEY_BODY_2 = f'{GEOMETRY_PREFIX}.0.vertices_parameter'
-# GEOMETRY_KEY2 = 'multibody_terms.contact_terms.geometries.0.length_params'
-
-FRICTION_INDEX_BY_BODY_NAME = {'body': 0, 'elbow_2': 0, 'elbow_1': 2}
-
-PERFORMANCE_METRICS = ['delta_v_squared_mean', 'v_plus_squared_mean',
- 'model_loss_mean', 'oracle_loss_mean',
- 'model_trajectory_mse_mean', 'oracle_trajectory_mse_mean',
- 'model_pos_int_traj', 'oracle_pos_int_traj',
- 'model_angle_int_traj', 'oracle_angle_int_traj',
- 'model_penetration_int_traj', 'oracle_penetration_int_traj']
-POST_PERFORMANCE_METRICS = \
- [f'pos_error_w_horizon_{i}' for i in ROLLOUT_LENGTHS] + \
- [f'rot_error_w_horizon_{i}' for i in ROLLOUT_LENGTHS]
-FIXED_HORIZON = 16
-FIXED_HORIZON_METRICS = [f'pos_error_w_horizon_{FIXED_HORIZON}',
- f'rot_error_w_horizon_{FIXED_HORIZON}']
-
-DATASET_EXPONENTS = [2, 3, 4, 5, 6, 7, 8, 9]
-SYSTEMS = ['cube', 'elbow', 'asymmetric']
-ORDERED_INERTIA_PARAMS = ['m', 'px', 'py', 'pz', 'I_xx', 'I_yy', 'I_zz',
- 'I_xy', 'I_xz', 'I_yz']
-TARGET_SAMPLE_KEY = 'model_target_sample'
-PREDICTION_SAMPLE_KEY = 'model_prediction_sample'
-
-ORIGINAL_KIND = 'original'
-POST_KIND = 'post'
-FIXED_HORIZON_KIND = 'fixed_horizon'
-KINDS = [ORIGINAL_KIND, POST_KIND, FIXED_HORIZON_KIND]
-
-# Template dictionaries, from low- to high-level.
-RUN_DICT = {'structured': None, 'contactnets': None, 'loss_variation': None,
- 'residual': None, 'result_set': None, 'results': None,
- 'learned_params': None, 'post_results': None,
- 'fixed_horizon_post_results': None}
-EXPERIMENT_DICT = {'system': None, 'prefix': None, 'data_sweep': None}
-
-BAD_RUN_NUMBERS = {
- 'elbow': [i for i in range(24)] + [i for i in range(25, 32)] + \
- [35, 36, 37, 38, 39, 40, 41, 42],
- 'cube': [i for i in range(24)] + [i for i in range(25, 32)],
- 'asymmetric_vortex':
- [i for i in range(24)] + [i for i in range(25, 32)] + \
- [33, 35, 36, 37, 39, 40],
- 'elbow_vortex':
- [i for i in range(24)] + [i for i in range(25, 30)] + [31, 33, 35],
- 'asymmetric_viscous':
- [i for i in range(24)] + [i for i in range(25, 30)] + [31, 33, 35],
- 'elbow_viscous':
- [i for i in range(24)] + [i for i in range(25, 30)] + [31, 33, 35]}
-
-# Prepend the below with 'sweep_' and postpend with '-#' to get the folders.
-EXPERIMENTS = {'cube': {'system': 'cube', 'prefix': 'sc'},
- 'elbow': {'system': 'elbow', 'prefix': 'se'},
- 'asymmetric_vortex': {'system': 'asymmetric', 'prefix': 'va'},}
- #'elbow_vortex': {'system': 'elbow', 'prefix': 've'},
- #'asymmetric_viscous': {'system': 'asymmetric', 'prefix': 'ba'},
- #'elbow_viscous': {'system': 'elbow', 'prefix': 'be'}}
-
-
-# ============================= Helper functions ============================= #
-# Return an empty data sweep dictionary, to prevent unintended data retention.
-def make_empty_data_sweep_dict():
- new_dict = {}
- for exp in DATASET_EXPONENTS: new_dict.update({exp: {}})
- return new_dict
-
-# Extract information out of a configuration object.
-def get_run_info_from_config(config):
- run_dict = deepcopy(RUN_DICT)
-
- run_dict['structured'] = False if \
- isinstance(config.learnable_config, DeepLearnableSystemConfig) else \
- True
- run_dict['contactnets'] = False if not run_dict['structured'] else \
- True if config.learnable_config.loss==MultibodyLosses.CONTACTNETS_LOSS \
- else False
- run_dict['loss_variation'] = 0 if not run_dict['structured'] else \
- config.learnable_config.loss_variation
- run_dict['residual'] = False if not run_dict['structured'] else \
- config.learnable_config.do_residual
- run_dict['result_set'] = 'test'
- run_name = config.run_name
-
- return run_name, run_dict
-
-# Calculate geometry measurements from a set of polygon vertices.
-def get_geometry_metrics_from_params(geom_params):
- # First, convert the parameters to meters.
- vertices = geom_params * _NOMINAL_HALF_LENGTH
-
- # Extract diameters and centers.
- mins = vertices.min(axis=0).values
- maxs = vertices.max(axis=0).values
-
- diameters = maxs - mins
- centers = (maxs + mins)/2
-
- geom_dict = {'diameter_x': diameters[0].item(),
- 'diameter_y': diameters[1].item(),
- 'diameter_z': diameters[2].item(),
- 'center_x': centers[0].item(),
- 'center_y': centers[1].item(),
- 'center_z': centers[2].item(),
- 'vertices': vertices.tolist()}
- return geom_dict
-
-def geometry_keys_by_sys_and_bodies(system, body_name):
- if system == 'cube' or system == 'asymmetric':
- return {'body': GEOMETRY_KEY_BODY_2}
- return {'elbow_1': GEOMETRY_KEY_BODY_1, 'elbow_2': GEOMETRY_KEY_BODY_2}
-
-# Get individual physical parameters from best learned system state.
-def get_physical_parameters(system, body_names, best_system_state):
- physical_params_dict = {}
-
- theta = best_system_state[INERTIA_KEY]
- friction_params = best_system_state[FRICTION_KEY]
- if GEOMETRY_KEY_BODY_2 in best_system_state.keys():
- geometry_keys = geometry_keys_by_sys_and_bodies(system, body_names)
- else:
- geometry_keys = {}
- print(f'\t\tFound non-polygon; won\'t gather geometry results.')
-
- inertia_pi_cm_params = InertialParameterConverter.theta_to_pi_cm(theta)
-
- # Loop over each body.
- for i in range(len(body_names)):
- body = body_names[i]
-
- # First, get the inertial parameters.
- i_params = inertia_pi_cm_params[i, :]
- i_params[1:4] /= i_params[0].item() # Divide out the mass.
-
- body_params = {}
-
- for j in range(10):
- body_params.update({ORDERED_INERTIA_PARAMS[j]: i_params[j].item()})
-
- # Second, get the friction parameters.
- mu_index = FRICTION_INDEX_BY_BODY_NAME[body]
- body_params.update({'mu': friction_params[mu_index].item()})
-
- # Third, get the geometry parameters.
- try:
- geometry_params = best_system_state[geometry_keys[body]]
- geom_dict = get_geometry_metrics_from_params(geometry_params)
- body_params.update(geom_dict)
- except:
- pass
-
- # Store the results.
- physical_params_dict.update({body: body_params})
-
- return physical_params_dict
-
-# Extract the desired statistics from the larger stats file. Will convert
-# numpy arrays into averages.
-def get_performance_from_stats(stats, set_name, kind=ORIGINAL_KIND):
- metrics = PERFORMANCE_METRICS if kind==ORIGINAL_KIND else \
- POST_PERFORMANCE_METRICS if kind==POST_KIND else \
- FIXED_HORIZON_METRICS
-
- performance_dict = {}
- for metric in metrics:
- key = f'{set_name}_{metric}'
- try:
- if type(stats[key]) == np.ndarray:
- performance_dict.update({key: np.average(stats[key])})
- else:
- performance_dict.update({key: stats[key]})
- except:
- print(f'\t\tDidn\'t find {key} in stats...')
- return performance_dict
-
-# Extract the target and prediction trajectories from the larger stats file.
-# This isn't called since the datatype isn't json serializable, but keeping this
-# function here for future reference.
-def get_sample_trajectories_from_stats(stats, set_name):
- targets, predictions = [], []
-
- target_key = f'{set_name}_{TARGET_SAMPLE_KEY}'
- try:
- targets = stats[target_key]
- except:
- print(f'\t\tDidn\'t find {target_key} in stats...')
-
- prediction_key = f'{set_name}_{PREDICTION_SAMPLE_KEY}'
- try:
- predictions = stats[prediction_key]
- except:
- print(f'\t\tDidn\'t find {prediction_key} in stats...')
-
- return targets, predictions
-
-# Get run configuration, statistics, and checkpoint objects. Returns None for
-# any that don't exist.
-def get_config_stats_checkpoint(runs_path, run):
- config, stats, checkpoint = None, None, None
-
- config_file = op.join(runs_path, run, 'config.pkl')
- if op.exists(config_file):
- with open(config_file, 'rb') as file:
- config = pickle.load(file)
-
- stats_file = op.join(runs_path, run, 'statistics.pkl')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
-
- checkpoint_file = op.join(runs_path, run, 'checkpoint.pt')
- if op.exists(checkpoint_file):
- checkpoint = torch.load(checkpoint_file)
-
- return config, stats, checkpoint
-
-def get_post_processed_stats_file(runs_path, run):
- stats = None
- stats_file = op.join(runs_path, run, 'post_processing',
- 'post_statistics.pkl')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
- return stats
-
-def get_post_processed_fixed_horizon_stats_file(runs_path, run):
- stats = None
- stats_file = op.join(runs_path, run, 'traj_sweep_statistics.pkl')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
- return stats
-
-# =============================== Gather data ================================ #
-# Loop over dataset categories, then dataset size, then individual runs.
-runs_needing_statistics = []
-finished_runs_needing_post_statistics = []
-results = {}
-
-sent_warning = {'elbow': False, 'cube': False}
-
-for experiment in EXPERIMENTS.keys():
- print(f'\n\n============== Starting {experiment} ==============')
- exp_dict = deepcopy(EXPERIMENT_DICT)
- system = EXPERIMENTS[experiment]['system']
- exp_dict['system'] = system
- exp_dict['prefix'] = EXPERIMENTS[experiment]['prefix']
- exp_dict['data_sweep'] = make_empty_data_sweep_dict()
-
- body_names = BODY_NAMES_BY_SYSTEM[system]
-
- for exponent in DATASET_EXPONENTS:
- results_folder_name = f'sweep_{experiment}-{exponent}'
- runs_path = op.join(RESULTS_DIR, results_folder_name, 'runs')
- if not op.isdir(runs_path):
- print(f'Could not find {results_folder_name} runs; skipping.')
- continue
-
- print(f'\nFound {results_folder_name}.')
-
- for run in os.listdir(runs_path):
- if int(run[2:4]) in BAD_RUN_NUMBERS[experiment]:
- continue
- if not sent_warning[experiment]:
- print(f'WARNING: Skipping run numbers ' + \
- f'{BAD_RUN_NUMBERS[experiment]}')
- sent_warning[experiment] = True
-
- config, stats, checkpoint = \
- get_config_stats_checkpoint(runs_path, run)
-
- if stats == None:
- print(f'\tNo stats file for {run}; skipping.')
- runs_needing_statistics.append(
- op.join(runs_path, run).split('results/')[-1])
- continue
-
- assert config != None and checkpoint != None
- print(f'\tFound statistics for {run}.', end='')
-
- run_key, run_dict = get_run_info_from_config(config)
-
- performance_dict = \
- get_performance_from_stats(stats, run_dict['result_set'])
- run_dict['results'] = performance_dict
-
- # Check for post-processed statistics.
- post_stats = get_post_processed_stats_file(runs_path, run)
- if post_stats == None:
- print(f' No post-processed statistics found.', end='')
- finished_runs_needing_post_statistics.append(
- op.join(runs_path, run).split('results/')[-1])
-
- else:
- print(f' Found post-processed stats, too.', end='')
- post_performance_dict = get_performance_from_stats(
- post_stats, 'test', kind=POST_KIND)
- run_dict['post_results'] = post_performance_dict
-
- # Check for post-processed fixed horizon statistics.
- fixed_horizon_stats = get_post_processed_fixed_horizon_stats_file(
- runs_path, run)
- if fixed_horizon_stats == None:
- print(f' No fixed horizon stats.')
- else:
- print(f' Also fixed horizon stats!')
- fixed_horizon_dict = get_performance_from_stats(
- fixed_horizon_stats, 'test', kind=FIXED_HORIZON_KIND)
- run_dict['fixed_horizon_post_results'] = fixed_horizon_dict
-
- # If structured, save learned physical parameters.
- if run_dict['structured']:
- best_system_state = checkpoint['best_learned_system_state']
- params_dict = get_physical_parameters(system, body_names,
- best_system_state)
- run_dict['learned_params'] = params_dict
-
- # Store everything in larger dictionary.
- exp_dict['data_sweep'][exponent].update({run_key: run_dict})
-
- results.update({experiment: exp_dict})
-
-print(f'\n\nSaving results to json file.')
-with open(JSON_OUTPUT_FILE, 'w') as file:
- json.dump(results, file, indent=2)
-
-pdb.set_trace()
-
-print(f'\n\nRuns needing statistics: {runs_needing_statistics}')
diff --git a/dair_pll_old/helpers/corl_plot.py b/dair_pll_old/helpers/corl_plot.py
deleted file mode 100644
index f4097af..0000000
--- a/dair_pll_old/helpers/corl_plot.py
+++ /dev/null
@@ -1,1125 +0,0 @@
-"""This script is meant to be run after `corl_gather_results.py`, whose output
-is a json file that this script accesses to generate plots.
-
-experiment/
- method/
- n_runs: dataset size <-- number of experiments
- metric/
- dataset size/
- [list of values]
- parameter/
- dataset size/
- [list of values] <-- empty if end-to-end
- post_metric/
- dataset_size/
- [list of values] <-- empty if not calculated
-"""
-
-from collections import defaultdict
-import sys
-from copy import deepcopy
-
-import json
-import math
-import os
-import os.path as op
-import pdb
-import re
-from typing import Any, DefaultDict, List, Tuple
-
-from matplotlib import rc, rcParams
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FormatStrFormatter, NullFormatter
-import numpy as np
-from scipy.optimize import linprog
-from scipy.spatial import ConvexHull, HalfspaceIntersection
-import torch
-from torch import Tensor
-
-from dair_pll.system import MeshSummary
-from dair_pll.deep_support_function import extract_outward_normal_hyperplanes
-
-
-
-RESULTS_DIR = os.path.join(os.path.dirname(__file__), '..', 'results')
-OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'plots')
-JSON_OUTPUT_FILE = op.join(op.dirname(__file__), 'results_cluster.json')
-JSON_GRAVITY_FILE = op.join(op.dirname(__file__), 'gravity_results.json')
-
-
-CN_METHODS_ONLY = [#'VimpI', 'VimpI RP',
- 'Vimp', 'Vimp RP']
-METHOD_RESULTS = {#'VimpI': '#01256e',
- #'VimpI RP': '#398537',
- 'CCN': '#01256e', #'#1111ff',
- 'CCN-R': '#398537', #'#11ff11',
- 'DiffSim': '#95001a',
- 'DiffSim-R': '#92668d',
- 'End-to-End': '#4a0042',}
-METRICS = {'model_loss_mean': {
- 'label': 'Loss', 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [None, None], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- # 'oracle_loss_mean': {
- # 'label': 'Loss',
- # 'yformat': "%.0f", 'scaling': 1.0,
- # 'ylims': {'elbow': [None, None], 'cube': [None, None],
- # 'asymmetric': [None, None]},
- # 'legend_loc': 'best'},
- 'model_trajectory_mse_mean': {
- 'label': 'Accumulated trajectory error', 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [None, None], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_pos_int_traj': {
- 'label': 'Trajectory positional error [m]', 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [-0.01, 0.4], 'cube': [-0.01, 0.4],
- 'asymmetric': [-0.01, 0.4]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_angle_int_traj': {
- 'label': 'Trajectory rotational error [deg]',
- 'scaling': 180/np.pi,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [0.0, 140], 'cube': [0.0, 140],
- 'asymmetric': [0.0, 140]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_penetration_int_traj': {
- 'label': 'Trajectory penetration [m]', 'scaling': 1.0,
- 'yformat': {'elbow': "%.3f", 'cube': "%.3f",
- 'asymmetric': "%.3f"},
- 'ylims': {'elbow': [-0.005, 0.03], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False}
- }
-
-ROLLOUT_LENGTHS = [1, 2, 4, 8, 16, 32, 64, 120]
-FIXED_HORIZON = 16
-FIXED_HORIZON_POS_ERROR = f'pos_error_w_horizon_{FIXED_HORIZON}'
-FIXED_HORIZON_ROT_ERROR = f'rot_error_w_horizon_{FIXED_HORIZON}'
-FIXED_HORIZON_METRICS_BY_EXPERIMENT = {
- 'cube': [],
- 'elbow': [FIXED_HORIZON_POS_ERROR, FIXED_HORIZON_ROT_ERROR],
- 'cube_gravity': [],
- 'asymmetric_vortex': [],
- 'asymmetric_viscous': [],
- 'asymmetric_gravity': [],
- 'elbow_vortex': [],
- 'elbow_viscous': [],
- 'elbow_gravity': []
-}
-FIXED_HORIZON_METRICS = {
- FIXED_HORIZON_POS_ERROR: METRICS['model_pos_int_traj'],
- FIXED_HORIZON_ROT_ERROR: METRICS['model_angle_int_traj']
-}
-
-PARAMETER_VALUES = ["m", "px", "py", "pz", "I_xx", "I_yy", "I_zz", "I_xy",
- "I_xz", "I_yz", "mu", "diameter_x", "diameter_y",
- "diameter_z", "center_x", "center_y", "center_z"]
-
-GEOMETRY_PARAMETER_ERROR = 'geometry_parameter_error'
-VERTEX_ERROR = 'vertex_error'
-VOLUME_ERROR = 'volume_error'
-FRICTION_PARAMETER_ERROR = 'friction_error'
-INERTIA_PARAMETER_ERROR = 'inertia_error'
-PARAMETER_ERRORS = {
- GEOMETRY_PARAMETER_ERROR: {'label': 'Geometry parameter error [m]',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.3f", 'cube': "%.3f",
- 'asymmetric': "%.3f"},
- 'ylims': {'elbow': [0.0, None],
- 'cube': [0.0, None],
- 'asymmetric': [0.0, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- VERTEX_ERROR: {'label': 'Average vertex location error [m]',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, None],
- 'cube': [0.0, None],
- 'asymmetric': [0.0, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- VOLUME_ERROR: {'label': 'Relative volume error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, 0.52],
- 'cube': [0.0, 0.52],
- 'asymmetric': [0.0, 0.52]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- FRICTION_PARAMETER_ERROR: {'label': 'Friction error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.1f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, 0.85],
- 'cube': [0.0, 0.85],
- 'asymmetric': [0.0, 0.85]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- INERTIA_PARAMETER_ERROR: {'label': 'Inertia parameter error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.1, 300],
- 'cube': [0.1, 300],
- 'asymmetric': [0.1, 300]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': True},
-}
-
-ALL_PARAMETER_METRICS = [GEOMETRY_PARAMETER_ERROR, FRICTION_PARAMETER_ERROR,
- INERTIA_PARAMETER_ERROR, VERTEX_ERROR, VOLUME_ERROR]
-PARAMETER_METRICS_BY_EXPERIMENT = {
- 'cube': [GEOMETRY_PARAMETER_ERROR, VERTEX_ERROR, VOLUME_ERROR],
- 'elbow': [GEOMETRY_PARAMETER_ERROR, VERTEX_ERROR, VOLUME_ERROR],
- 'cube_gravity': ALL_PARAMETER_METRICS,
- 'asymmetric_vortex': ALL_PARAMETER_METRICS,
- 'asymmetric_viscous': ALL_PARAMETER_METRICS,
- 'asymmetric_gravity': ALL_PARAMETER_METRICS,
- 'elbow_vortex': ALL_PARAMETER_METRICS,
- 'elbow_viscous': ALL_PARAMETER_METRICS,
- 'elbow_gravity': ALL_PARAMETER_METRICS}
-
-ELBOW_HALF_VERTICES = Tensor([
- [-0.0500, -0.02500, 0.02500],
- [0.0500, -0.02500, 0.02500],
- [-0.0500, 0.02500, 0.02500],
- [0.0500, 0.02500, 0.02500],
- [-0.0500, 0.02500, -0.02500],
- [0.0500, 0.02500, -0.02500],
- [-0.0500, -0.02500, -0.02500],
- [0.0500, -0.02500, -0.02500]])
-ASYMMETRIC_VERTICES = Tensor([
- [ 0.0, -0.02500000037252903, -0.05000000074505806],
- [ 0.07500000298023224, 0.0, 0.0],
- [ 0.0, 0.05000000074505806, -0.02500000037252903],
- [ -0.02500000037252903, 0.02500000037252903, -0.02500000037252903],
- [ 0.02500000037252903, 0.02500000037252903, 0.02500000037252903],
- [ 0.05000000074505806, -0.02500000037252903, 0.02500000037252903]])
-CUBE_VERTICES = Tensor([
- [ -0.052400, -0.052400, 0.052400],
- [ 0.052400, -0.052400, 0.052400],
- [ -0.052400, 0.052400, 0.052400],
- [ 0.052400, 0.052400, 0.052400],
- [ -0.052400, 0.052400, -0.052400],
- [ 0.052400, 0.052400, -0.052400],
- [ -0.052400, -0.052400, -0.052400],
- [ 0.052400, -0.052400, -0.052400]])
-
-CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY = {
- 'cube': {
- 'body': {
- 'diameter_x': 0.1048, 'diameter_y': 0.1048, 'diameter_z': 0.1048,
- 'center_x': 0., 'center_y': 0., 'center_z': 0.,
- 'mu': 0.15, 'm': 0.37, 'px': 0.0, 'py': 0.0, 'pz': 0.0,
- 'I_xx': 0.00081, 'I_yy': 0.00081, 'I_zz': 0.00081,
- 'I_xy': 0.0, 'I_xz': 0.0, 'I_yz': 0.0,
- 'scaling_vector': 1.0 / np.array([
- 0.37, 0.035, 0.035, 0.035, 0.00081, 0.00081, 0.00081, 0.00081,
- 0.00081, 0.00081]), 'vertices': CUBE_VERTICES
- }
- },
- 'elbow': {
- 'elbow_1': {
- 'diameter_x': 0.1, 'diameter_y': 0.05, 'diameter_z': 0.05,
- 'center_x': 0., 'center_y': 0., 'center_z': 0.,
- 'mu': 0.3, 'm': 0.37, 'px': 0.0, 'py': 0.0, 'pz': 0.0,
- 'I_xx': 0.0006167, 'I_yy': 0.0006167, 'I_zz': 0.0006167,
- 'I_xy': 0.0, 'I_xz': 0.0, 'I_yz': 0.0,
- 'scaling_vector' :1.0 / np.array([
- 0.37, 0.035, 0.035, 0.035, 0.0006167, 0.0006167, 0.0006167,
- 0.0006167, 0.0006167, 0.0006167]), 'vertices': ELBOW_HALF_VERTICES
- },
- 'elbow_2': {
- 'diameter_x': 0.1, 'diameter_y': 0.05, 'diameter_z': 0.05,
- 'center_x': 0.035, 'center_y': 0., 'center_z': 0.,
- 'mu': 0.3, 'm': 0.37, 'px': 0.035, 'py': 0.0, 'pz': 0.0,
- 'I_xx': 0.0006167, 'I_yy': 0.0006167, 'I_zz': 0.0006167,
- 'I_xy': 0.0, 'I_xz': 0.0, 'I_yz': 0.0,
- 'scaling_vector' :1.0 / np.array([
- 0.37, 0.035, 0.035, 0.035, 0.0006167, 0.0006167, 0.0006167,
- 0.0006167, 0.0006167, 0.0006167]), 'vertices': ELBOW_HALF_VERTICES
- }
- },
- 'asymmetric': {
- 'body': {
- 'diameter_x': 0.10000000149011612,
- 'diameter_y': 0.07500000298023224,
- 'diameter_z': 0.07500000298023224,
- 'center_x': 0.02500000223517418,
- 'center_y': 0.012500000186264515,
- 'center_z': -0.012500000186264515,
- 'mu': 0.15, 'm': 0.25, 'px': 0.0, 'py': 0.0, 'pz': 0.0,
- 'I_xx': 0.00081, 'I_yy': 0.00081, 'I_zz': 0.00081,
- 'I_xy': 0.0, 'I_xz': 0.0, 'I_yz': 0.0,
- 'scaling_vector': 1.0 / np.array([
- 0.25, 0.035, 0.035, 0.035, 0.00081, 0.00081, 0.00081, 0.00081,
- 0.00081, 0.00081]), 'vertices': ASYMMETRIC_VERTICES
- }
- }
-}
-N_RUNS = 'n_runs'
-
-SYSTEM_BY_EXPERIMENT = {
- 'cube': 'cube',
- 'elbow': 'elbow',
- 'asymmetric_vortex': 'asymmetric',
- 'asymmetric_viscous': 'asymmetric',
- 'elbow_vortex': 'elbow',
- 'elbow_viscous': 'elbow'}
-TITLE_BY_EXPERIMENT = {
- 'cube': 'Cube with Real Data',
- 'elbow': 'Articulated Object with Real Data',
- 'asymmetric_vortex': 'Asymmetric in Vortex Sim',
- 'asymmetric_viscous': 'Asymmetric in Viscous Sim',
- 'elbow_vortex': 'Articulated Object in Vortex Sim',
- 'elbow_viscous': 'Articulated Object in Vortex Sim',
- 'elbow_gravity': 'Articulated Object in Gravity Sim',
- 'cube_gravity': 'Cube in Gravity Sim'}
-
-DATASET_SIZE_DICT = {2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []}
-GRAVITY_FRACTION_DICT = {0.: [], 0.5: [], 1.: [], 1.5: [], 2.0: []}
-
-# The following are t values for 95% confidence interval.
-T_SCORE_PER_DOF = {1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776,
- 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306,
- 9: 2.262, 10: 2.228, 11: 2.201, 12: 2.179,
- 13: 2.160, 14: 2.145, 15: 2.131, 16: 2.120,
- 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086,
- 21: 2.080, 22: 2.074, 23: 2.069, 24: 2.064,
- 25: 2.060, 26: 2.056, 27: 2.052, 28: 2.048,
- 29: 2.045, 30: 2.042}
-
-RUN_NUMBERS_TO_SKIP = [str(i).zfill(2) for i in range(20)]
-GRAVITY_RUN_NUMBERS_TO_SKIP = [str(i).zfill(2) for i in range(3)]
-
-XS = [2**(key-1) for key in DATASET_SIZE_DICT.keys()]
-
-# Some settings on the plot generation.
-rc('legend', fontsize=30)
-plt.rc('axes', titlesize=40) # fontsize of the axes title
-plt.rc('axes', labelsize=40) # fontsize of the x and y labels
-
-
-# ============================= Helper functions ============================= #
-def extract_mesh_from_support_points(support_points: Tensor) -> MeshSummary:
- """Given a set of convex polytope vertices, extracts a vertex/face mesh.
-
- Args:
- support_points: ``(*, 3)`` polytope vertices.
-
- Returns:
- Object vertices and face indices.
- """
- support_point_hashes = set()
- unique_support_points = []
-
- # remove duplicate vertices
- for vertex in support_points:
- vertex_hash = hash(vertex.numpy().tobytes())
- if vertex_hash in support_point_hashes:
- continue
- support_point_hashes.add(vertex_hash)
- unique_support_points.append(vertex)
-
- vertices = torch.stack(unique_support_points)
- hull = ConvexHull(vertices.numpy())
- faces = Tensor(hull.simplices).to(torch.long) # type: ignore
-
- _, backwards, _ = extract_outward_normal_hyperplanes(
- vertices.unsqueeze(0), faces.unsqueeze(0))
- backwards = backwards.squeeze(0)
- faces[backwards] = faces[backwards].flip(-1)
-
- return MeshSummary(vertices=support_points, faces=faces)
-
-def _get_mesh_interior_point(halfspaces: np.ndarray) -> Tuple[np.ndarray, float]:
- norm_vector = np.reshape(np.linalg.norm(halfspaces[:, :-1], axis=1),
- (halfspaces.shape[0], 1))
- objective_coefficients = np.zeros((halfspaces.shape[1],))
- objective_coefficients[-1] = -1
- A = np.hstack((halfspaces[:, :-1], norm_vector))
- b = -halfspaces[:, -1:]
- res = linprog(objective_coefficients, A_ub=A, b_ub=b, bounds=(None, None))
- interior_point = res.x[:-1]
- interior_point_gap = res.x[-1]
- return interior_point, interior_point_gap
-
-def calculate_error_vertices(vertices_learned: Tensor,
- vertices_true: Tensor) -> Tensor:
- """Relative error between two convex hulls of provided vertices.
-
- use the identity that the area of the non-overlapping region is the
- sum of the areas of the two polygons minus twice the area of their
- intersection.
-
- Args:
- vertices_learned: (N, 3) tensor of vertices of the learned geometry.
- vertices_true: (N, 3) tensor of vertices of the true geometry.
- """
- # pylint: disable=too-many-locals
- true_volume = ConvexHull(vertices_true.numpy()).volume
- sum_volume = ConvexHull(vertices_learned.numpy()).volume + true_volume
-
- mesh_learned = extract_mesh_from_support_points(vertices_learned)
- mesh_true = extract_mesh_from_support_points(vertices_true)
-
- normal_learned, _, extent_learned = extract_outward_normal_hyperplanes(
- mesh_learned.vertices.unsqueeze(0), mesh_learned.faces.unsqueeze(0))
- normal_true, _, extent_true = extract_outward_normal_hyperplanes(
- mesh_true.vertices.unsqueeze(0), mesh_true.faces.unsqueeze(0))
-
- halfspaces_true = torch.cat(
- [normal_true.squeeze(), -extent_true.squeeze().unsqueeze(-1)],
- dim=1)
-
- halfspaces_learned = torch.cat(
- [normal_learned.squeeze(), -extent_learned.squeeze().unsqueeze(-1)],
- dim=1)
-
- intersection_halfspaces = torch.cat(
- [halfspaces_true, halfspaces_learned], dim=0).numpy()
-
- # find interior point of intersection
- interior_point, interior_point_gap = _get_mesh_interior_point(
- intersection_halfspaces)
-
- intersection_volume = 0.
-
- if interior_point_gap > 0.:
- # intersection is non-empty
- intersection_halfspace_convex = HalfspaceIntersection(
- intersection_halfspaces, interior_point)
-
- intersection_volume = ConvexHull(
- intersection_halfspace_convex.intersections).volume
-
- return Tensor([sum_volume - 2 * intersection_volume
- ]).abs() / true_volume
-
-def calculate_vertex_position_error(true_vertices, learned_vertices):
- true_vertices = Tensor(true_vertices)
- learned_vertices = Tensor(learned_vertices)
- assert true_vertices.shape == learned_vertices.shape
- assert true_vertices.shape[1] == 3
-
- vert_displacement = true_vertices - learned_vertices
- vert_dists = torch.linalg.norm(vert_displacement, dim=1)
-
- return vert_dists.sum().item()
-
-def get_empty_experiment_dict_by_experiment(experiment):
- # First get a list of bodies in the system.
- system = SYSTEM_BY_EXPERIMENT[experiment]
- bodies = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys()
-
- # Then build structure.
- empty_dict_per_experiment = deepcopy(METHOD_RESULTS)
- for method in empty_dict_per_experiment.keys():
- empty_dict_per_experiment[method] = deepcopy(METRICS)
- empty_dict_per_experiment[method].update(
- {N_RUNS: deepcopy(DATASET_SIZE_DICT)})
- for metric in METRICS.keys():
- empty_dict_per_experiment[method][metric] = \
- deepcopy(DATASET_SIZE_DICT)
- for param_metric in PARAMETER_METRICS_BY_EXPERIMENT[experiment]:
- empty_dict_per_experiment[method].update(
- {param_metric: deepcopy(DATASET_SIZE_DICT)})
- for post_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- empty_dict_per_experiment[method].update(
- {post_metric: deepcopy(DATASET_SIZE_DICT)})
- for exponent in DATASET_SIZE_DICT.keys():
- empty_dict_per_experiment[method][N_RUNS][exponent] = 0
-
- return empty_dict_per_experiment
-
-def get_empty_gravity_experiment_dict_by_experiment(experiment):
- # First get a list of bodies in the system.
- system = SYSTEM_BY_EXPERIMENT[experiment]
- bodies = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys()
-
- # Then build structure.
- empty_dict_per_experiment = deepcopy(METHOD_RESULTS)
- for method in empty_dict_per_experiment.keys():
- empty_dict_per_experiment[method] = deepcopy(METRICS)
- empty_dict_per_experiment[method].update(
- {N_RUNS: deepcopy(GRAVITY_FRACTION_DICT)})
- for metric in METRICS.keys():
- empty_dict_per_experiment[method][metric] = \
- deepcopy(GRAVITY_FRACTION_DICT)
- for param_metric in PARAMETER_METRICS_BY_EXPERIMENT[f'{experiment}_gravity']:
- empty_dict_per_experiment[method].update(
- {param_metric: deepcopy(GRAVITY_FRACTION_DICT)})
- for gravity_frac in GRAVITY_FRACTION_DICT.keys():
- empty_dict_per_experiment[method][N_RUNS][gravity_frac] = 0
-
- return empty_dict_per_experiment
-
-def set_of_vals_to_t_confidence_interval(ys):
- if len(ys) <= 1:
- return None, None, None
-
- dof = len(ys) - 1
-
- ys_np = np.array(ys)
-
- mean = np.mean(ys)
- lower = mean - T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
- upper = mean + T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
-
- return mean, lower, upper
-
-def get_method_name_by_run_dict(run_dict):
- if not run_dict['structured']:
- return 'End-to-End'
- elif not run_dict['contactnets'] and run_dict['residual']:
- return 'DiffSim-R'
- elif not run_dict['contactnets'] and not run_dict['residual']:
- return 'DiffSim'
- elif run_dict['loss_variation'] == 3:
- return 'dummy'
- # if run_dict['contactnets'] and run_dict['residual']:
- # return 'VimpI RP'
- # elif run_dict['contactnets'] and not run_dict['residual']:
- # return 'VimpI'
- elif run_dict['loss_variation'] == 1:
- if run_dict['contactnets'] and run_dict['residual']:
- return 'CCN-R'
- elif run_dict['contactnets'] and not run_dict['residual']:
- return 'CCN'
-
- raise RuntimeError(f"Unknown method with run_dict: {run_dict}")
-
-def fill_exp_dict_with_single_run_data(run_dict, sweep_instance, exp_dict, gravity=False):
- method = get_method_name_by_run_dict(run_dict)
-
- exp_key = f'{experiment}_gravity' if gravity else experiment
-
- for result_metric in run_dict['results'].keys():
- new_key = result_metric[5:] if result_metric[:5] == 'test_' else \
- result_metric
-
- if new_key in METRICS:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
- elif new_key in PARAMETER_METRICS_BY_EXPERIMENT[exp_key]:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
- elif new_key in FIXED_HORIZON_METRICS_BY_EXPERIMENT[exp_key]:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
-
- return exp_dict
-
-def convert_lists_to_t_conf_dict(exp_dict, sweep_instance):
- # Iterate over methods then metrics and parameters.
- for method in METHOD_RESULTS.keys():
- # Here "quantity" can be a metric or parameter.
- for quantity in exp_dict[method].keys():
- if quantity == N_RUNS:
- continue
-
- vals = exp_dict[method][quantity][sweep_instance]
-
- mean, lower, upper = set_of_vals_to_t_confidence_interval(vals)
-
- exp_dict[method][quantity][sweep_instance] = {
- 'mean': mean, 'lower': lower, 'upper': upper
- }
- exp_dict[method][N_RUNS][sweep_instance] = \
- max(len(vals), exp_dict[method][N_RUNS][sweep_instance])
-
- return exp_dict
-
-def get_plottable_values(exp_dict, metric, method, metric_lookup, gravity=False):
- try:
- data_dict = exp_dict[method][metric]
- except:
- return [None], [None], [None], [None]
-
- xs, ys, lowers, uppers = [], [], [], []
-
- scaling = metric_lookup[metric]['scaling']
-
- for x in data_dict.keys():
- if gravity:
- xs.append(x*9.81)
- else:
- xs.append(2**(x-1))
- ys.append(data_dict[x]['mean'])
- lowers.append(data_dict[x]['lower'])
- uppers.append(data_dict[x]['upper'])
-
- if None not in ys:
- ys = [y*scaling for y in ys]
- lowers = [lower*scaling for lower in lowers]
- uppers = [upper*scaling for upper in uppers]
-
- return xs, ys, lowers, uppers
-
-def get_plottable_run_counts(exp_dict, method, gravity=False):
- data_dict = exp_dict[method][N_RUNS]
-
- xs, ys = [], []
-
- for x in data_dict.keys():
- if not gravity:
- xs.append(2**(x-1))
- else:
- xs.append(x)
- ys.append(data_dict[x])
-
- return xs, ys
-
-def convert_parameters_to_errors(run_dict, experiment, gravity=False):
- params_dict = run_dict['learned_params']
- if params_dict == None:
- return run_dict
-
- exp_key = f'{experiment}_gravity' if gravity else experiment
-
- for param_metric in PARAMETER_METRICS_BY_EXPERIMENT[exp_key]:
- if param_metric == GEOMETRY_PARAMETER_ERROR:
- run_dict = calculate_geometry_error(run_dict, experiment)
- elif param_metric == FRICTION_PARAMETER_ERROR:
- run_dict = calculate_friction_error(run_dict, experiment)
- elif param_metric == INERTIA_PARAMETER_ERROR:
- run_dict = calculate_inertia_error(run_dict, experiment)
- elif param_metric in [VERTEX_ERROR, VOLUME_ERROR]:
- # These are already calculated in the geometry error function.
- pass
- else:
- raise RuntimeError(f"Can't handle {param_metric} type.")
-
- return run_dict
-
-def format_plot(ax, fig, metric, metric_lookup, experiment, gravity=False):
- system = SYSTEM_BY_EXPERIMENT[experiment.split('_gravity')[0]]
-
- if not gravity:
- ax.set_xscale('log')
- ax.set_xlim(min(XS), max(XS))
- x_markers = [round(x, 1) for x in XS]
- else:
- ax.set_xlim(0, 2*9.81)
- x_markers = [0, 0.5*9.81, 1*9.81, 1.5*9.81, 2*9.81]
-
- if metric_lookup[metric]['log']:
- ax.set_yscale('log')
-
- ax.set_ylim(bottom=metric_lookup[metric]['ylims'][system][0],
- top=metric_lookup[metric]['ylims'][system][1])
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(x_markers)
- if metric == "volume_error":
- ax.set_xticklabels(x_markers)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
-
- ax.yaxis.set_major_formatter(
- FormatStrFormatter(metric_lookup[metric]['yformat'][system]))
- # ax.yaxis.set_minor_formatter(
- # FormatStrFormatter(metric_lookup[metric]['yformat'][system]))
-
- if metric in ["volume_error", FRICTION_PARAMETER_ERROR,
- INERTIA_PARAMETER_ERROR]:
- if not gravity:
- plt.xlabel('Training tosses')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- else:
- plt.xlabel('Modeled Gravity Acceleration [$m/s^2$]')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
-
- if (experiment == 'elbow' and not gravity) or \
- metric in [INERTIA_PARAMETER_ERROR, FRICTION_PARAMETER_ERROR]:
- plt.ylabel(metric_lookup[metric]['label'])
- else:
- ax.set_yticklabels([])
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- if metric in ['model_pos_int_traj', FRICTION_PARAMETER_ERROR,
- INERTIA_PARAMETER_ERROR]:
- plt.title(TITLE_BY_EXPERIMENT[experiment], fontsize=40)
-
- # if experiment == 'elbow_gravity' and metric == 'volume_error':
- # ax.plot([0], [10], label=method, linewidth=5,
- # color=METHOD_RESULTS[method])
- # plt.legend(handles, labels)
- # plt.legend(loc=metric_lookup[metric]['legend_loc'][system],
- # prop=dict(weight='bold'))
-
- # # Shrink current axis by 20%
- # box = ax.get_position()
- # ax.set_position([box.x0, box.y0, box.width * 0.5, box.height])
-
- # # Put a legend to the right of the current axis
- # ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
- # prop=dict(weight='bold'))
-
-
- fig.set_size_inches(13, 13)
-
-def get_single_body_correct_geometry_array(system, body):
- # In order of diameters then centers x y z, get the correct parameters.
- params = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]
- ground_truth = np.array([params['diameter_x'], params['diameter_y'],
- params['diameter_z'], params['center_x'], params['center_y'],
- params['center_z']])
- return ground_truth
-
-def get_single_body_correct_inertia_array(system, body):
- # In order of mass, CoM xyz, inertia xx yy zz xy xz yz, get the correct
- # parameters.
- params = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]
- ground_truth = np.array([params['m'], params['px'], params['py'],
- params['pz'], params['I_xx'], params['I_yy'], params['I_zz'],
- params['I_xy'], params['I_xz'], params['I_yz']])
- return ground_truth
-
-def calculate_geometry_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- vertex_err = 0.
- volume_err = 0.
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth_verts = \
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['vertices']
- learned_verts = Tensor(body_dict['vertices'])
-
- vertex_err += calculate_error_vertices(
- learned_verts, ground_truth_verts).item()
- volume_err += calculate_vertex_position_error(
- ground_truth_verts, learned_verts)
-
- ground_truth = get_single_body_correct_geometry_array(system, body)
-
- learned = np.array([body_dict['diameter_x'], body_dict['diameter_y'],
- body_dict['diameter_z'], body_dict['center_x'],
- body_dict['center_y'], body_dict['center_z']])
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate geometry error as norm of the difference between learned and
- # true values.
- geometry_error = np.linalg.norm(true_vals - learned_vals)
-
- n_bodies = len(CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys())
- n_verts = len(CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['vertices'])
-
- vertex_error = vertex_err / (n_bodies * n_verts)
- volume_error = volume_err / n_bodies
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({GEOMETRY_PARAMETER_ERROR: geometry_error})
- run_dict['results'].update({VERTEX_ERROR: vertex_error})
- run_dict['results'].update({VOLUME_ERROR: volume_error})
- return run_dict
-
-def calculate_inertia_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth = get_single_body_correct_inertia_array(system, body)
-
- learned = np.array([body_dict['m'], body_dict['px'], body_dict['py'],
- body_dict['pz'], body_dict['I_xx'],
- body_dict['I_yy'], body_dict['I_zz'],
- body_dict['I_xy'], body_dict['I_xz'],
- body_dict['I_yz']])
-
- # Since inertia parameters can be such different sizes, scale all of
- # them to get on similar scale.
- ground_truth = np.multiply(
- ground_truth,
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['scaling_vector']
- )
- learned = np.multiply(
- learned,
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['scaling_vector']
- )
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate inertia error as norm of the scaled difference between learned
- # and true values.
- inertia_error = np.linalg.norm(true_vals - learned_vals)
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({INERTIA_PARAMETER_ERROR: inertia_error})
- return run_dict
-
-def calculate_friction_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth = np.array([
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['mu']])
- learned = np.array([body_dict['mu']])
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate friction error as norm of the difference between learned and
- # true values.
- friction_error = np.linalg.norm(true_vals - learned_vals)
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({FRICTION_PARAMETER_ERROR: friction_error})
- return run_dict
-
-def include_fixed_horizon_post_stats(run_dict, experiment):
- params_dict = run_dict['fixed_horizon_post_results']
- if params_dict == None:
- return run_dict
-
- for post_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- run_dict['results'].update({
- post_metric: params_dict[f'test_{post_metric}']})
- return run_dict
-
-def do_run_num_plot(exp_dict, experiment, gravity=False):
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys = get_plottable_run_counts(exp_dict, method, gravity=gravity)
-
- # Plot the run numbers.
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
-
- if not gravity:
- ax.set_xscale('log')
- ax.set_xlim(min(XS), max(XS))
- x_markers = [round(x, 1) for x in XS]
- else:
- ax.set_xlim(0, 2)
- x_markers = [0, 0.5, 1, 1.5, 2]
-
- ax.set_ylim(0, None)
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(x_markers)
- ax.set_xticklabels(x_markers)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
-
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
-
- if not gravity:
- plt.xlabel('Training tosses')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- else:
- plt.xlabel('Gravity fraction')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.1f"))
-
- plt.ylabel('Number of runs')
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- plt.legend(handles, labels)
- plt.legend(prop=dict(weight='bold'))
-
- fig.set_size_inches(13, 13)
-
- plt.title(experiment)
- fig_name = 'gravity_' if gravity else ''
- fig_name += f'{experiment}_run_nums.png'
- fig_path = op.join(OUTPUT_DIR, fig_name)
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
-# =============================== Plot results =============================== #
-# Load the results from the json file.
-with open(JSON_OUTPUT_FILE) as file:
- results = json.load(file)
-
-sent_warning = False
-
-# Iterate over experiments.
-for experiment in results.keys():
- system = SYSTEM_BY_EXPERIMENT[experiment]
- exp_dict = get_empty_experiment_dict_by_experiment(experiment)
-
- data_sweep = results[experiment]['data_sweep']
-
- # Iterate over dataset sizes to collect all the data.
- for exponent_str in data_sweep.keys():
- exponent = int(exponent_str)
-
- # Iterate over runs.
- for run_name, run_dict in data_sweep[exponent_str].items():
- if run_name[2:4] in RUN_NUMBERS_TO_SKIP:
- if not sent_warning:
- print(f'WARNING: Skipping any run numbers in ' + \
- f'{RUN_NUMBERS_TO_SKIP}.')
- sent_warning = True
- continue
-
- run_dict = convert_parameters_to_errors(run_dict, experiment)
- if get_method_name_by_run_dict(run_dict) == 'dummy': continue
- run_dict = include_fixed_horizon_post_stats(run_dict, experiment)
- exp_dict = fill_exp_dict_with_single_run_data(run_dict, exponent,
- exp_dict)
-
- # Convert lists to dictionary with keys average, upper, and lower.
- exp_dict = convert_lists_to_t_conf_dict(exp_dict, exponent)
-
- # Iterate over the metrics to do plots of each.
- for metric in METRICS.keys():
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys, lowers, uppers = get_plottable_values(exp_dict, metric,
- method, METRICS)
- # Plot the method unless there are any None objects.
- if None in ys or None in lowers or None in lowers:
- continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- ax.fill_between(xs, lowers, uppers, alpha=0.3,
- color=METHOD_RESULTS[method])
-
- format_plot(ax, fig, metric, METRICS, experiment)
-
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Iterate over parameter metrics to do plots of each.
- for parameter_metric in PARAMETER_METRICS_BY_EXPERIMENT[experiment]:
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys, lowers, uppers = get_plottable_values(
- exp_dict, parameter_metric, method, PARAMETER_ERRORS)
-
- # Plot the method unless there are any None objects.
- if None in ys or None in lowers or None in lowers:
- continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- ax.fill_between(xs, lowers, uppers, alpha=0.3,
- color=METHOD_RESULTS[method])
-
- format_plot(ax, fig, parameter_metric, PARAMETER_ERRORS, experiment)
-
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{parameter_metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Iterate over fixed horizon post-processing metrics to do plots of each.
- for fixed_horizon_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys, lowers, uppers = get_plottable_values(
- exp_dict, fixed_horizon_metric, method, FIXED_HORIZON_METRICS)
-
- # Plot the method unless there are any None objects.
- if None in ys or None in lowers or None in lowers:
- continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- ax.fill_between(xs, lowers, uppers, alpha=0.3,
- color=METHOD_RESULTS[method])
-
- format_plot(ax, fig, fixed_horizon_metric, FIXED_HORIZON_METRICS, experiment)
- plt.title(experiment)
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{fixed_horizon_metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Add in a test plot of the number of experiments.
- do_run_num_plot(exp_dict, experiment)
-
-
-# =========================== Plot gravity results =========================== #
-# Load the results from the gravity json file.
-with open(JSON_GRAVITY_FILE) as file:
- results = json.load(file)
-
-sent_warning = False
-
-# Iterate over gravity experiments.
-for experiment in results.keys():
- system = SYSTEM_BY_EXPERIMENT[experiment]
- exp_dict = get_empty_gravity_experiment_dict_by_experiment(experiment)
-
- gravity_sweep = results[experiment]['gravity_sweep']
-
- # Iterate over dataset sizes to collect all the data.
- for grav_frac in gravity_sweep.keys():
- grav_frac = float(grav_frac)
-
- # Iterate over runs.
- for run_name, run_dict in gravity_sweep[str(grav_frac)].items():
- if run_name[2:4] in GRAVITY_RUN_NUMBERS_TO_SKIP:
- if not sent_warning:
- print(f'WARNING: Skipping any run numbers in ' + \
- f'{GRAVITY_RUN_NUMBERS_TO_SKIP}.')
- sent_warning = True
- continue
-
- run_dict = convert_parameters_to_errors(run_dict, experiment,
- gravity=True)
- if get_method_name_by_run_dict(run_dict) == 'dummy': continue
- exp_dict = fill_exp_dict_with_single_run_data(
- run_dict, grav_frac, exp_dict, gravity=True)
-
- # Convert lists to dictionary with keys average, upper, and lower.
- exp_dict = convert_lists_to_t_conf_dict(exp_dict, grav_frac)
-
- # Iterate over the metrics to do plots of each.
- for metric in METRICS.keys():
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys, lowers, uppers = get_plottable_values(
- exp_dict, metric, method, METRICS, gravity=True)
-
- # Plot the method unless there are any None objects.
- if None in ys or None in lowers or None in lowers:
- continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- ax.fill_between(xs, lowers, uppers, alpha=0.3,
- color=METHOD_RESULTS[method])
-
- format_plot(ax, fig, metric, METRICS, f'{experiment}_gravity',
- gravity=True)
- fig_path = op.join(OUTPUT_DIR, f'gravity_{experiment}_{metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Iterate over parameter metrics to do plots of each.
- exp_key = f'{experiment}_gravity'
- for parameter_metric in PARAMETER_METRICS_BY_EXPERIMENT[exp_key]:
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys, lowers, uppers = get_plottable_values(
- exp_dict, parameter_metric, method, PARAMETER_ERRORS,
- gravity=True)
-
- # Plot the method unless there are any None objects.
- if None in ys or None in lowers or None in lowers:
- continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- ax.fill_between(xs, lowers, uppers, alpha=0.3,
- color=METHOD_RESULTS[method])
-
- format_plot(ax, fig, parameter_metric, PARAMETER_ERRORS,
- f'{experiment}_gravity', gravity=True)
- fig_path = op.join(OUTPUT_DIR, f'gravity_{experiment}_{parameter_metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Add in a test plot of the number of experiments.
- do_run_num_plot(exp_dict, experiment, gravity=True)
-
-
-
-
-
-
-
diff --git a/dair_pll_old/helpers/experiments.py b/dair_pll_old/helpers/experiments.py
deleted file mode 100644
index 646e887..0000000
--- a/dair_pll_old/helpers/experiments.py
+++ /dev/null
@@ -1,276 +0,0 @@
-from dair_pll import file_utils
-from dair_pll.dataset_management import ExperimentDataManager
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import DrakeDeepLearnableExperiment, DrakeMultibodyLearnableExperiment, MultibodyLearnableSystemConfig
-from dair_pll.experiment import TrainingState
-import torch
-from torch import Tensor
-import numpy as np
-import matplotlib.pyplot as plt
-import argparse
-import pickle
-import os
-import seaborn as sns
-from scipy.spatial.transform import Rotation as R
-
-# The following are t values for 95% confidence interval.
-T_SCORE_PER_DOF = {1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776,
- 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306,
- 9: 2.262, 10: 2.228, 11: 2.201, 12: 2.179,
- 13: 2.160, 14: 2.145, 15: 2.131, 16: 2.120,
- 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086,
- 21: 2.080, 22: 2.074, 23: 2.069, 24: 2.064,
- 25: 2.060, 26: 2.056, 27: 2.052, 28: 2.048,
- 29: 2.045, 30: 2.042}
-
-def load_pkl(filename):
- with open(filename, 'rb') as file:
- data = pickle.load(file)
- print(data.keys())
-
-def visualize_trajectory(trajectory_dir, fig_name):
- traj = torch.load(trajectory_dir) #q_t(wxyz), p_t, w_t, dp_t
- print(f'traj loaded: {traj.size()}') #N,13
- p_t = traj[:,4:7].numpy() #N,3
- q_t = traj[:,:4].numpy() #N,4, w,x,y,z
- q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
- dp_t = traj[:,10:].numpy() #N,3
- w_t_body = traj[:,7:10].numpy() #N,3, in body frame
- fig, ax = plt.subplots(4, 3, figsize=(15, 15))
-
- # Plot positions
- ax[0, 0].plot(p_t[:, 0])
- ax[0, 0].set_title('X Position')
- ax[0, 1].plot(p_t[:, 1])
- ax[0, 1].set_title('Y Position')
- ax[0, 2].plot(p_t[:, 2])
- ax[0, 2].set_title('Z Position')
-
- # Plot Quaternion components
- ax[1, 0].plot(q_t[:, 0])
- ax[1, 0].set_title('Quaternion w')
- ax[1, 1].plot(q_t[:, 1])
- ax[1, 1].set_title('Quaternion x')
- ax[1, 2].plot(q_t[:, 2])
- ax[1, 2].set_title('Quaternion y')
- ax[2, 0].plot(q_t[:, 3])
- ax[2, 0].set_title('Quaternion z')
-
- # Plot angular velocities
- ax[2, 1].plot(w_t_body[:, 0])
- ax[2, 1].set_title('Angular Velocity X')
- ax[2, 2].plot(w_t_body[:, 1])
- ax[2, 2].set_title('Angular Velocity Y')
- ax[3, 0].plot(w_t_body[:, 2])
- ax[3, 0].set_title('Angular Velocity Z')
-
- # Plot linear velocities
- ax[3, 1].plot(dp_t[:, 0])
- ax[3, 1].set_title('Linear Velocity X')
- ax[3, 2].plot(dp_t[:, 1])
- ax[3, 2].set_title('Linear Velocity Y')
-
- plt.tight_layout()
- fig.suptitle('ContactNets cube trajectory')
- plt.savefig(fig_name)
- print(f'Saved to {fig_name}')
- plt.show()
-
-
-def load_experiment(run_name):
- run_path = f"./results/{storage}/runs/{run_name}"
- storage_name = os.path.abspath(os.path.join(run_path, '..', '..'))
- print(storage_name)
- experiment_config = file_utils.load_configuration(storage_name, run_name)
- if isinstance(experiment_config.learnable_config,
- MultibodyLearnableSystemConfig):
- experiment_config.learnable_config.randomize_initialization = False
- return DrakeMultibodyLearnableExperiment(experiment_config)
- elif isinstance(experiment_config.learnable_config,
- DeepLearnableSystemConfig):
- return DrakeDeepLearnableExperiment(experiment_config)
- raise RuntimeError(f'Cannot recognize learnable type ' + \
- f'{experiment_config.learnable_config}')
-
-def get_test_set_traj_target_and_prediction(experiment):
- stats = file_utils.load_evaluation(experiment.config.storage,
- experiment.config.run_name)
- print("##########", len(stats['test_model_target_sample']))
- test_traj_target = stats['test_model_target_sample'][0]
- test_traj_prediction = stats['test_model_prediction_sample'][0]
- return Tensor(test_traj_target), Tensor(test_traj_prediction)
-
-def get_best_system_from_experiment(exp):
- checkpoint_filename = file_utils.get_model_filename(exp.config.storage,
- exp.config.run_name)
- checkpoint_dict = torch.load(checkpoint_filename)
- training_state = TrainingState(**checkpoint_dict)
-
- assert training_state.finished_training
-
- exp.learning_data_manager = ExperimentDataManager(
- exp.config.storage, exp.config.data_config,
- training_state.trajectory_set_split_indices)
- train_set, _, test_set = \
- exp.learning_data_manager.get_updated_trajectory_sets()
- learned_system = exp.get_learned_system(torch.cat(train_set.trajectories))
- learned_system.load_state_dict(training_state.best_learned_system_state)
-
- return learned_system
-
-def load_experiment_run_dir_sys(storage, run_name):
- experiment = load_experiment(run_name)
- run_dir = f'./results/{storage}/runs/{run_name}'
- print(f'Loading {run_dir}')
- learned_system = get_best_system_from_experiment(experiment)
- return experiment, run_dir, learned_system
-
-def split_traj(traj):
- p_t = traj[:,4:7].numpy() #N,3
- q_t = traj[:,:4].numpy() #N,4, w,x,y,z
- q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
- dp_t = traj[:,10:].numpy() #N,3
- w_t_body = traj[:,7:10].numpy() #N,3, in body frame
- return p_t, q_t_shuffled, dp_t, w_t_body
-
-def eval(storage):
- rot_errs = []
- trans_errs = []
- for run_name in os.listdir(os.path.join('results', storage, 'runs')):
- if int(run_name.split('-')[-1]) in BAD_RUNS:
- continue
- print(f'Processing {run_name}')
- experiment, run_dir, learned_system = \
- load_experiment_run_dir_sys(storage, run_name)
- gt_traj, pred_traj = get_test_set_traj_target_and_prediction(
- experiment)
- print(gt_traj.size(), pred_traj.size())
- p_t_gt, q_t_gt, dp_t_gt, w_t_gt = split_traj(gt_traj)
- p_t_est, q_t_est, dp_t_est, w_t_est = split_traj(pred_traj)
- rot_gt = R.from_quat(q_t_gt).as_matrix()
- rot_est = R.from_quat(q_t_est).as_matrix()
- rot_err = np.linalg.norm(rot_gt - rot_est, 'fro', axis=(1,2))
- trans_err = np.linalg.norm(p_t_gt - p_t_est,axis=1)
- print(f'rot: {rot_err.shape}, trans: {trans_err.shape}')
- rot_errs.append(rot_err)
- trans_errs.append(trans_err)
- rot_errs = np.array(rot_errs)
- trans_errs = np.array(trans_errs)
- print(rot_errs.shape, trans_errs.shape)
- np.savetxt('rot_errs.txt', rot_errs)
- np.savetxt('trans_errs.txt', trans_errs)
-
-def set_of_vals_to_t_confidence_interval(ys):
- if len(ys) <= 1:
- return None, None, None
-
- dof = len(ys) - 1
-
- ys_np = np.array(ys)
-
- mean = np.mean(ys)
- lower = mean - T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
- upper = mean + T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
-
- return mean, lower, upper
-
-def plot():
- sns.set_style("whitegrid")
- rot_means, rot_lowers, rot_uppers = [], [], []
- trans_means, trans_lowers, trans_uppers = [], [], []
- rot_error_local = np.loadtxt('rot_errs.txt') #N,99
- trans_error_local = np.loadtxt('trans_errs.txt')
- rot_errors_cluster = np.loadtxt('rot_errs_cluster.txt')
- trans_errors_cluster = np.loadtxt('trans_errs_cluster.txt')
- rot_errors = np.concatenate((rot_error_local, rot_errors_cluster))
- trans_errors = np.concatenate((trans_error_local, trans_errors_cluster))
- print(rot_errors.shape, trans_errors.shape)
- for i in range(rot_errors.shape[1]):
- mean, lower, upper = set_of_vals_to_t_confidence_interval(rot_errors[:,i])
- mean_trans, lower_trans, upper_trans = set_of_vals_to_t_confidence_interval(trans_errors[:,i])
- rot_means.append(mean)
- rot_lowers.append(lower)
- rot_uppers.append(upper)
- trans_means.append(mean_trans)
- trans_lowers.append(lower_trans)
- trans_uppers.append(upper_trans)
-
- timestamps = np.arange(rot_errors.shape[1])
- # rot error
- plt.figure(figsize=(8, 7))
- ax = sns.lineplot(x=timestamps, y=rot_means, label='Mean Rotation Error', color='blue')
- ax.fill_between(timestamps, rot_lowers, rot_uppers, alpha=0.5, color=sns.xkcd_rgb['baby blue'], label='95% Confidence Interval')
- ax.set_facecolor('lavender') # Setting the background to light blue
- plt.gca().set_facecolor('lavender')
- ax.set_xlabel('Timesteps', fontsize=18)
- ax.set_ylabel('Rotational Error (rad)', fontsize=18)
- ax.set_xlim(0, 98)
- ax.set_ylim(0, 3.0)
- ax.set_title('Mean Rotation Error with 95% Confidence Interval', fontsize=18)
- ax.tick_params(axis='x', labelsize=18)
- ax.tick_params(axis='y', labelsize=18)
- ax.legend(fontsize=18)
-
- plt.tight_layout()
- plt.show()
- plt.savefig(f'./results/{storage}/{storage}_rot.png')
- print(f'Saved to ./results/{storage}/{storage}_rot.png')
- # trans error
- plt.figure(figsize=(8, 7))
- ax = sns.lineplot(x=timestamps, y=trans_means, label='Mean Translation Error', color='blue')
- ax.fill_between(timestamps, trans_lowers, trans_uppers, alpha=0.6, color=sns.xkcd_rgb['baby blue'], label='95% Confidence Interval')
- ax.set_facecolor('lavender') # Setting the background to light blue
- plt.gca().set_facecolor('lavender')
- ax.set_xlabel('Timesteps', fontsize=18)
- ax.set_ylabel('Translational Error (m)', fontsize=18)
- ax.set_xlim(0, 98)
- ax.set_ylim(0, 0.25)
- ax.set_title('Mean Translation Error with 95% Confidence Interval', fontsize=18)
- ax.legend(fontsize=18)
- ax.tick_params(axis='x', labelsize=18) # Adjust x axis tick label font size
- ax.tick_params(axis='y', labelsize=18)
- plt.tight_layout()
- plt.show()
- plt.savefig(f'./results/{storage}/{storage}_trans.png')
- print(f'Saved to ./results/{storage}/{storage}_trans.png')
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--storage",
- type=str,
- required=True,
- )
- parser.add_argument(
- "--run",
- type=str,
- required=False,
- )
- parser.add_argument(
- "--toss_id",
- type=int,
- required=False,
- )
- args = parser.parse_args()
- storage = args.storage
- run = args.run
- toss_id = args.toss_id
-
- BAD_RUNS = set((6,8,10,11))
-
- # gt_traj = f'./results/{storage}/data/ground_truth/{toss_id}.pt'
- # est_traj = f'./results/{storage}/data/learning/{toss_id}.pt'
- # visualize_trajectory(gt_traj, f'./results/{run}/gt_{run}_{toss_id}.png')
- # visualize_trajectory(est_traj, f'./results/{run}/est_{run}_{toss_id}.png')
-
- # stats = f'./results/{storage}/runs/{run}/statistics.pkl'
- # load_pkl(stats)
-
- eval(storage)
- # plot()
-
- # import pickle
- # config_dir = "/home/cnets-vision/mengti_ws/dair_pll_latest/results/final_gt_mesh/runs/final_gt_mesh-10/config.pkl"
- # with open(config_dir, 'rb') as file:
- # data = pickle.load(file)
- # print(data)
diff --git a/dair_pll_old/helpers/explore_run_status.py b/dair_pll_old/helpers/explore_run_status.py
deleted file mode 100644
index 63fbfb8..0000000
--- a/dair_pll_old/helpers/explore_run_status.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""Explore the results of a failed experiment run.
-
-Note: File gather_hyperparam_results.py now gets all of this information
-individually.
-"""
-
-import pdb
-import torch
-import wandb
-import csv
-
-from dair_pll.file_utils import *
-
-
-WANDB_PROJECT_CLUSTER = 'dair_pll-cluster'
-WANDB_PROJECT_LOCAL = 'dair_pll-dev'
-
-VALID_MODEL_MSE = 'valid_model_trajectory_mse_mean'
-POS_MODEL_ERROR = 'valid_model_pos_int_traj_mean'
-ROT_MODEL_ERROR = 'valid_model_angle_int_traj_mean'
-PENETRATION_MODEL = 'valid_model_penetration_int_traj_mean'
-
-VALID_MSE = 'valid_trajectory_mse'
-POS_ERROR = 'valid_pos_int_traj'
-ROT_ERROR = 'valid_angle_int_traj'
-PENETRATION = 'valid_penetration_int_traj'
-
-storage_name = '/home/bibit/dair_pll/results/hpreal_elbow' #'test_elbow'
-
-
-lookup_by_run_name = {}
-lookup_by_wandb_id = {}
-
-
-with open('hyperparameter_real_lookup.csv', newline='') as csvfile:
- csv_reader = csv.DictReader(csvfile)
- for row in csv_reader:
- lookup_by_run_name[row['run name']] = row
- lookup_by_wandb_id[row['wandb_id']] = row
-
-
-for run_name in lookup_by_run_name.keys():
- run_dict = lookup_by_run_name[run_name]
- wandb_id = run_dict['wandb_id']
-
- try:
- statistics = load_evaluation(storage_name, run_name)
- run_dict['best valid MSE'] = statistics[VALID_MODEL_MSE]
- run_dict['best position MSE'] = statistics[POS_MODEL_ERROR]
- run_dict['best angular MSE'] = statistics[ROT_MODEL_ERROR]
- run_dict['best penetration'] = statistics[PENETRATION_MODEL]
- print('Found statistics file.')
-
- except FileNotFoundError:
- print('No statistics file found; searching wandb logs.')
-
- # config = load_configuration(storage_name, run_name)
- checkpoint_filename = get_model_filename(storage_name, run_name)
- checkpoint_dict = torch.load(checkpoint_filename)
-
- wandb_run_id = checkpoint_dict['wandb_run_id']
-
- api = wandb.Api()
- run = api.run(f'ebianchi/{WANDB_PROJECT_CLUSTER}/{wandb_run_id}')
- run_history = run.history(pandas=False)
-
- best_valid_mse = run_history[0][VALID_MSE]
- best_pos_error = run_history[0][POS_ERROR]
- best_rot_error = run_history[0][ROT_ERROR]
- best_penetration = run_history[0][PENETRATION]
-
- for epoch_dict in run_history:
- new_valid_mse = epoch_dict[VALID_MSE]
- if new_valid_mse < best_valid_mse:
- best_valid_mse = new_valid_mse
-
- new_pos_error = epoch_dict[POS_ERROR]
- if new_pos_error < best_pos_error:
- best_pos_error = new_pos_error
-
- new_rot_error = epoch_dict[ROT_ERROR]
- if new_rot_error < best_rot_error:
- best_rot_error = new_rot_error
-
- new_penetration = epoch_dict[PENETRATION]
- if new_penetration < best_penetration:
- best_penetration = new_penetration
-
- run_dict['best valid MSE'] = best_valid_mse
- run_dict['best position MSE'] = best_pos_error
- run_dict['best angular MSE'] = best_rot_error
- run_dict['best penetration'] = best_penetration
-
-
- lookup_by_run_name[run_name] = run_dict
- lookup_by_wandb_id[wandb_id] = run_dict
-
-
-with open('hyperparameter_real_performance.csv', 'w', newline='') as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames=run_dict.keys())
-
- writer.writeheader()
- for run_info in lookup_by_wandb_id.keys():
- writer.writerow(lookup_by_wandb_id[run_info])
-
-pdb.set_trace()
-
-
diff --git a/dair_pll_old/helpers/find_wandb_group.py b/dair_pll_old/helpers/find_wandb_group.py
deleted file mode 100644
index 6fed918..0000000
--- a/dair_pll_old/helpers/find_wandb_group.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""Helper script to determine the run parameters from the W&B group ID.
-
-Note: File gather_hyperparam_results.py now gets all of this information
-individually.
-"""
-import os
-import os.path as op
-import git
-import fnmatch
-import pdb
-import csv
-
-
-ELBOW_HP_SCRIPT_PATTERN = 'startup_hpreal_elbow_ie????.bash'
-
-FIELDNAMES = ['wandb_id', 'run name', 'loss variation', 'w_comp', 'w_diss',
- 'w_pen']
-
-
-repo = git.Repo(search_parent_directories=True)
-git_folder = repo.git.rev_parse("--show-toplevel")
-git_folder = op.normpath(git_folder)
-
-startup_scripts_folder = op.join(git_folder, 'examples')
-
-startup_scripts_list = sorted(os.listdir(startup_scripts_folder))
-
-
-def get_params_from_bash_script(script_name):
- full_script_path = f'{startup_scripts_folder}/{script_name}'
- script = open(full_script_path, 'r').read()
- wandb_id = script.split('WANDB_RUN_GROUP=')[-1].split(';')[0]
- loss_variation = script.split('loss-variation=')[-1].split(' ')[0]
- w_comp = script.split('w-comp=')[-1].split(' ')[0]
- w_diss = script.split('w-diss=')[-1].split(' ')[0]
- w_pen = script.split('w-pen=')[-1].split('\n')[0].split(' ')[0]
-
- return {'wandb_id': wandb_id,
- 'loss variation': loss_variation,
- 'w_comp': w_comp,
- 'w_diss': w_diss,
- 'w_pen': w_pen}
-
-
-lookup_by_wandb_id = {}
-lookup_by_run_name = {}
-
-for script in startup_scripts_list:
- if fnmatch.fnmatch(script, ELBOW_HP_SCRIPT_PATTERN):
- run_name = script.split('_')[-1].split('.')[0]
- params_dict = get_params_from_bash_script(script)
- params_dict['run name'] = run_name
- wandb_id = params_dict['wandb_id']
-
- print(run_name, wandb_id)
-
- lookup_by_wandb_id[wandb_id] = params_dict
- lookup_by_run_name[run_name] = params_dict
-
-
-
-with open('hyperparameter_real_lookup.csv', 'w', newline='') as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
-
- writer.writeheader()
- for run_info in lookup_by_wandb_id.keys():
- writer.writerow(lookup_by_wandb_id[run_info])
-
-
-pdb.set_trace()
diff --git a/dair_pll_old/helpers/gather_hyperparam_results.py b/dair_pll_old/helpers/gather_hyperparam_results.py
deleted file mode 100644
index 31f1dd0..0000000
--- a/dair_pll_old/helpers/gather_hyperparam_results.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""Helper script to determine the run parameters from the W&B group ID."""
-
-import os
-import os.path as op
-import git
-import fnmatch
-import pdb
-import csv
-import wandb
-import torch
-
-from dair_pll.file_utils import *
-
-
-# For results that generated hp_search_2.csv:
-# CSV_NAME = 'hp_search_2.csv'
-# HP_SCRIPT_PATTERN = 'startup_hpreal_elbow_ie????.bash'
-# storage_name = '/home/bibit/dair_pll/results/hpreal_elbow'
-# Set a minimum run number since multiple hyperparameter searches were conducted
-# in the same results folder.
-# MINIMUM_RUN_NUM = 372
-
-# For results that generated hp_search_3.csv:
-# CSV_NAME = 'hp_search_3.csv'
-# HP_SCRIPT_PATTERN = 'startup_elbow_real_re??-?.bash'
-# storage_name = '/home/bibit/dair_pll/results/elbow_real'
-# Set a minimum run number since multiple hyperparameter searches were conducted
-# in the same results folder.
-# MINIMUM_RUN_NUM = 13 # the minimum for hp_search_3.csv is 33
-
-# For results that generated hp_search_4.csv:
-# CSV_NAME = 'hp_search_4.csv'
-# HP_SCRIPT_PATTERN = 'startup_hpr_elbow_ie????.bash'
-# storage_name = '/home/bibit/dair_pll/results/hpr_elbow'
-# Set a minimum run number since multiple hyperparameter searches were conducted
-# in the same results folder.
-# MINIMUM_RUN_NUM = 0 # the minimum for hp_search_4.csv is 0
-
-# For results that generated viscous_hp_search.csv:
-CSV_NAME = 'viscous_hp_search.csv'
-HP_SCRIPT_PATTERN = 'startup_shp_asymmetric_viscous_aa????.bash'
-storage_name = '/home/bibit/dair_pll/results/shp_asymmetric_viscous'
-# Set a minimum run number since multiple hyperparameter searches were conducted
-# in the same results folder.
-MINIMUM_RUN_NUM = 0 # the minimum for viscous_hp_search.csv is 0
-
-# For results that generated viscous_elbow_hp.csv:
-CSV_NAME = 'viscous_elbow_hp.csv'
-HP_SCRIPT_PATTERN = 'startup_shp_elbow_viscous_ae????.bash'
-storage_name = '/home/bibit/dair_pll/results/shp_elbow_viscous'
-# Set a minimum run number since multiple hyperparameter searches were conducted
-# in the same results folder.
-MINIMUM_RUN_NUM = 0 # the minimum for viscous_elbow_hp.csv is 0
-
-
-
-WANDB_PROJECT_CLUSTER = 'dair_pll-cluster'
-WANDB_PROJECT_LOCAL = 'dair_pll-dev'
-
-VALID_MODEL_MSE = 'valid_model_trajectory_mse_mean'
-POS_MODEL_ERROR = 'valid_model_pos_int_traj_mean'
-ROT_MODEL_ERROR = 'valid_model_angle_int_traj_mean'
-PENETRATION_MODEL = 'valid_model_penetration_int_traj_mean'
-
-VALID_MSE = 'valid_trajectory_mse'
-POS_ERROR = 'valid_pos_int_traj'
-ROT_ERROR = 'valid_angle_int_traj'
-PENETRATION = 'valid_penetration_int_traj'
-
-
-
-repo = git.Repo(search_parent_directories=True)
-git_folder = repo.git.rev_parse("--show-toplevel")
-git_folder = op.normpath(git_folder)
-
-startup_scripts_folder = op.join(git_folder, 'examples')
-
-startup_scripts_list = sorted(os.listdir(startup_scripts_folder))
-
-def get_parameter_from_string(param: str, long_string: str):
- return long_string.split(f'{param}=')[-1].split('\n')[0].split(' ')[0].split(';')[0]
-
-
-def get_params_from_bash_script(script_name):
- full_script_path = f'{startup_scripts_folder}/{script_name}'
- script = open(full_script_path, 'r').read()
-
- wandb_id = get_parameter_from_string('WANDB_RUN_GROUP', script)
- loss_variation = get_parameter_from_string('loss-variation', script)
- w_pred = get_parameter_from_string('w-pred', script)
- w_comp = get_parameter_from_string('w-comp', script)
- w_diss = get_parameter_from_string('w-diss', script)
- w_pen = get_parameter_from_string('w-pen', script)
- w_res = get_parameter_from_string('w-res', script)
- w_res_w = get_parameter_from_string('w-res-w', script)
-
- return {'wandb_id': wandb_id,
- 'loss variation': loss_variation,
- 'w_pred': w_pred,
- 'w_comp': w_comp,
- 'w_diss': w_diss,
- 'w_pen': w_pen,
- 'w_res': w_res,
- 'w_res_w': w_res_w}
-
-
-lookup_by_wandb_id = {}
-lookup_by_run_name = {}
-
-params_dict = {}
-
-for script in startup_scripts_list:
- if fnmatch.fnmatch(script, HP_SCRIPT_PATTERN):
- run_name = script.split('_')[-1].split('.')[0]
- try:
- run_num = int(run_name.split('-')[0][2:])
- except:
- continue
-
- if run_num >= MINIMUM_RUN_NUM:
- params_dict = get_params_from_bash_script(script)
- params_dict['run name'] = run_name
- wandb_id = params_dict['wandb_id']
-
- print(f'{run_name}, {wandb_id}:', end=' ')
-
- try:
- statistics = load_evaluation(storage_name, run_name)
- params_dict['best valid MSE'] = statistics[VALID_MODEL_MSE]
- params_dict['best position MSE'] = statistics[POS_MODEL_ERROR]
- params_dict['best angular MSE'] = statistics[ROT_MODEL_ERROR]
- params_dict['best penetration'] = statistics[PENETRATION_MODEL]
- print('Found statistics file.')
-
- except FileNotFoundError:
- print('No statistics file found; searching wandb logs.', end='')
-
- checkpoint_filename = get_model_filename(storage_name, run_name)
- checkpoint_dict = torch.load(checkpoint_filename)
-
- wandb_run_id = checkpoint_dict['wandb_run_id']
-
- api = wandb.Api()
- try:
- run = api.run(
- f'ebianchi/{WANDB_PROJECT_CLUSTER}/{wandb_run_id}')
- print('')
- except:
- print(' --> Could not find W&B run, skipping.')
- continue
-
- run_history = run.history(pandas=False)
-
- best_valid_mse = run_history[0][VALID_MSE]
- best_pos_error = run_history[0][POS_ERROR]
- best_rot_error = run_history[0][ROT_ERROR]
- best_penetration = run_history[0][PENETRATION]
-
- for epoch_dict in run_history:
- new_valid_mse = epoch_dict[VALID_MSE]
- if new_valid_mse < best_valid_mse:
- best_valid_mse = new_valid_mse
-
- new_pos_error = epoch_dict[POS_ERROR]
- if new_pos_error < best_pos_error:
- best_pos_error = new_pos_error
-
- new_rot_error = epoch_dict[ROT_ERROR]
- if new_rot_error < best_rot_error:
- best_rot_error = new_rot_error
-
- new_penetration = epoch_dict[PENETRATION]
- if new_penetration < best_penetration:
- best_penetration = new_penetration
-
- params_dict['best valid MSE'] = best_valid_mse
- params_dict['best position MSE'] = best_pos_error
- params_dict['best angular MSE'] = best_rot_error
- params_dict['best penetration'] = best_penetration
-
-
- lookup_by_run_name[run_name] = params_dict
- lookup_by_wandb_id[wandb_id] = params_dict
-
-
-
-with open(CSV_NAME, 'w', newline='') as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames=params_dict.keys())
-
- writer.writeheader()
- for run_info in lookup_by_run_name.keys():
- writer.writerow(lookup_by_run_name[run_info])
-
-
-pdb.set_trace()
-
diff --git a/dair_pll_old/helpers/gather_results.py b/dair_pll_old/helpers/gather_results.py
deleted file mode 100644
index c20f79a..0000000
--- a/dair_pll_old/helpers/gather_results.py
+++ /dev/null
@@ -1,356 +0,0 @@
-import json
-import os
-import os.path as op
-import pdb
-import pickle
-import torch
-from copy import deepcopy
-
-import numpy as np
-
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import MultibodyLosses
-from dair_pll.geometry import _NOMINAL_HALF_LENGTH
-from dair_pll.inertia import InertialParameterConverter
-
-# Directory management.
-RESULTS_DIR = op.join(op.dirname(__file__), '..', 'results')
-OUTPUT_DIR = op.join(op.dirname(__file__), '..', 'plots')
-JSON_OUTPUT_FILE = op.join(op.dirname(__file__), 'results_gt.json')
-
-ROLLOUT_LENGTHS = [4, 8, 16, 32, 64, 128, 256, 512]
-
-BODY_NAMES_BY_SYSTEM = {'cube': ['body']}
-BODY_PARAMETERS = {
- 'm': 'Mass',
- 'com_x': 'CoM x',
- 'com_y': 'CoM y',
- 'com_z': 'CoM z',
- 'I_xx': 'I_xx',
- 'I_yy': 'I_yy',
- 'I_zz': 'I_zz',
- 'I_xy': 'I_xy',
- 'I_xz': 'I_xz',
- 'I_yz': 'I_yz',
- 'mu': 'Friction coefficient',
- 'center_x': 'Geometry center x',
- 'center_y': 'Geometry center y',
- 'center_z': 'Geometry center z',
- 'diameter_x': 'Geometry diameter x',
- 'diameter_y': 'Geometry diameter x',
- 'diameter_z': 'Geometry diameter x'}
-POLYGON_GEOMETRY_PARAMETERS = ['center_x', 'center_y', 'center_z',
- 'diameter_x', 'diameter_y', 'diameter_z']
-
-INERTIA_KEY = 'multibody_terms.lagrangian_terms.inertial_parameters'
-FRICTION_KEY = 'multibody_terms.contact_terms.friction_params'
-GEOMETRY_PREFIX = 'multibody_terms.contact_terms.geometries'
-GEOMETRY_KEY_BODY_1 = f'{GEOMETRY_PREFIX}.2.vertices_parameter'
-GEOMETRY_KEY_BODY_2 = f'{GEOMETRY_PREFIX}.0.vertices_parameter'
-# GEOMETRY_KEY2 = 'multibody_terms.contact_terms.geometries.0.length_params'
-
-# TODO: What is this
-FIXED_HORIZON = 16
-FIXED_HORIZON_METRICS = [f'pos_error_w_horizon_{FIXED_HORIZON}',
- f'rot_error_w_horizon_{FIXED_HORIZON}']
-
-FRICTION_INDEX_BY_BODY_NAME = {'body': 0, 'elbow_2': 0, 'elbow_1': 2}
-
-PERFORMANCE_METRICS = ['delta_v_squared_mean', 'v_plus_squared_mean',
- 'model_loss_mean', 'oracle_loss_mean',
- 'model_trajectory_mse_mean', 'oracle_trajectory_mse_mean',
- 'model_pos_int_traj', 'oracle_pos_int_traj',
- 'model_angle_int_traj', 'oracle_angle_int_traj',
- 'model_penetration_int_traj', 'oracle_penetration_int_traj']
-POST_PERFORMANCE_METRICS = \
- [f'pos_error_w_horizon_{i}' for i in ROLLOUT_LENGTHS] + \
- [f'rot_error_w_horizon_{i}' for i in ROLLOUT_LENGTHS]
-FIXED_HORIZON = 16
-FIXED_HORIZON_METRICS = [f'pos_error_w_horizon_{FIXED_HORIZON}',
- f'rot_error_w_horizon_{FIXED_HORIZON}']
-
-DATASET_EXPONENTS = [2, 3, 4, 5, 6, 7, 8, 9]
-SYSTEMS = ['cube']
-ORDERED_INERTIA_PARAMS = ['m', 'px', 'py', 'pz', 'I_xx', 'I_yy', 'I_zz',
- 'I_xy', 'I_xz', 'I_yz']
-TARGET_SAMPLE_KEY = 'model_target_sample'
-PREDICTION_SAMPLE_KEY = 'model_prediction_sample'
-
-ORIGINAL_KIND = 'original'
-POST_KIND = 'post'
-FIXED_HORIZON_KIND = 'fixed_horizon'
-KINDS = [ORIGINAL_KIND, POST_KIND, FIXED_HORIZON_KIND]
-
-# Template dictionaries, from low- to high-level.
-RUN_DICT = {'structured': None, 'contactnets': None, 'loss_variation': None,
- 'residual': None, 'result_set': None, 'results': None,
- 'learned_params': None, 'post_results': None,
- 'fixed_horizon_post_results': None}
-EXPERIMENT_DICT = {'system': None, 'prefix': None, 'data_sweep': None}
-
-# BAD_RUN_NUMBERS = {
-# 'cube': [i for i in range(24)] + [i for i in range(25, 32)]
-# }
-
-# Prepend the below with 'sweep_' and postpend with '-#' to get the folders.
-EXPERIMENTS = {'cube': {'system': 'cube', 'prefix': 'bundlesdf'},
- }
- # 'elbow': {'system': 'elbow', 'prefix': 'se'},
- # 'asymmetric_vortex': {'system': 'asymmetric', 'prefix': 'va'},}
- #'elbow_vortex': {'system': 'elbow', 'prefix': 've'},
- #'asymmetric_viscous': {'system': 'asymmetric', 'prefix': 'ba'},
- #'elbow_viscous': {'system': 'elbow', 'prefix': 'be'}}
-
-
-# ============================= Helper functions ============================= #
-# Return an empty data sweep dictionary, to prevent unintended data retention.
-def make_empty_data_sweep_dict():
- new_dict = {}
- for exp in DATASET_EXPONENTS: new_dict.update({exp: {}})
- return new_dict
-
-# Extract information out of a configuration object.
-def get_run_info_from_config(config):
- run_dict = deepcopy(RUN_DICT)
-
- run_dict['structured'] = False if \
- isinstance(config.learnable_config, DeepLearnableSystemConfig) else \
- True
- run_dict['contactnets'] = False if not run_dict['structured'] else \
- True if config.learnable_config.loss==MultibodyLosses.CONTACTNETS_LOSS \
- else False
- run_dict['loss_variation'] = 0 if not run_dict['structured'] else \
- config.learnable_config.loss_variation
- run_dict['residual'] = False if not run_dict['structured'] else \
- config.learnable_config.do_residual
- run_dict['result_set'] = 'test'
- run_name = config.run_name
-
- return run_name, run_dict
-
-# Calculate geometry measurements from a set of polygon vertices.
-def get_geometry_metrics_from_params(geom_params):
- # First, convert the parameters to meters.
- vertices = geom_params * _NOMINAL_HALF_LENGTH
-
- # Extract diameters and centers.
- mins = vertices.min(axis=0).values
- maxs = vertices.max(axis=0).values
-
- diameters = maxs - mins
- centers = (maxs + mins)/2
-
- geom_dict = {'diameter_x': diameters[0].item(),
- 'diameter_y': diameters[1].item(),
- 'diameter_z': diameters[2].item(),
- 'center_x': centers[0].item(),
- 'center_y': centers[1].item(),
- 'center_z': centers[2].item(),
- 'vertices': vertices.tolist()}
- return geom_dict
-
-def geometry_keys_by_sys_and_bodies(system, body_name):
- if system == 'cube' or system == 'asymmetric':
- return {'body': GEOMETRY_KEY_BODY_2}
- return {'elbow_1': GEOMETRY_KEY_BODY_1, 'elbow_2': GEOMETRY_KEY_BODY_2}
-
-# Get individual physical parameters from best learned system state.
-def get_physical_parameters(system, body_names, best_system_state):
- physical_params_dict = {}
-
- theta = best_system_state[INERTIA_KEY]
- friction_params = best_system_state[FRICTION_KEY]
- if GEOMETRY_KEY_BODY_2 in best_system_state.keys():
- geometry_keys = geometry_keys_by_sys_and_bodies(system, body_names)
- else:
- geometry_keys = {}
- print(f'\t\tFound non-polygon; won\'t gather geometry results.')
-
- inertia_pi_cm_params = InertialParameterConverter.theta_to_pi_cm(theta)
-
- # Loop over each body.
- for i in range(len(body_names)):
- body = body_names[i]
-
- # First, get the inertial parameters.
- i_params = inertia_pi_cm_params[i, :]
- i_params[1:4] /= i_params[0].item() # Divide out the mass.
-
- body_params = {}
-
- for j in range(10):
- body_params.update({ORDERED_INERTIA_PARAMS[j]: i_params[j].item()})
-
- # Second, get the friction parameters.
- mu_index = FRICTION_INDEX_BY_BODY_NAME[body]
- body_params.update({'mu': friction_params[mu_index].item()})
-
- # Third, get the geometry parameters.
- try:
- geometry_params = best_system_state[geometry_keys[body]]
- geom_dict = get_geometry_metrics_from_params(geometry_params)
- body_params.update(geom_dict)
- except:
- pass
-
- # Store the results.
- physical_params_dict.update({body: body_params})
-
- return physical_params_dict
-
-# Extract the desired statistics from the larger stats file. Will convert
-# numpy arrays into averages.
-def get_performance_from_stats(stats, set_name, kind=ORIGINAL_KIND):
- metrics = PERFORMANCE_METRICS if kind==ORIGINAL_KIND else \
- POST_PERFORMANCE_METRICS if kind==POST_KIND else \
- FIXED_HORIZON_METRICS
-
- performance_dict = {}
- for metric in metrics:
- key = f'{set_name}_{metric}'
- try:
- if type(stats[key]) == np.ndarray:
- performance_dict.update({key: np.average(stats[key])})
- else:
- performance_dict.update({key: stats[key]})
- except:
- print(f'\t\tDidn\'t find {key} in stats...')
- return performance_dict
-
-# Get run configuration, statistics, and checkpoint objects. Returns None for
-# any that don't exist.
-def get_config_stats_checkpoint(runs_path, run):
- config, stats, checkpoint = None, None, None
-
- config_file = op.join(runs_path, run, 'config.pkl')
- if op.exists(config_file):
- with open(config_file, 'rb') as file:
- config = pickle.load(file)
-
- stats_file = op.join(runs_path, run, 'statistics.pkl')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
-
- checkpoint_file = op.join(runs_path, run, 'checkpoint.pt')
- if op.exists(checkpoint_file):
- checkpoint = torch.load(checkpoint_file)
-
- return config, stats, checkpoint
-
-def get_post_processed_stats_file(runs_path, run):
- stats = None
- stats_file = op.join(runs_path, run, 'post_processing',
- 'post_statistics.pkl')
- print(f'Looking for stats file at {stats_file}')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
- return stats
-
-def get_post_processed_fixed_horizon_stats_file(runs_path, run):
- stats = None
- stats_file = op.join(runs_path, run, 'traj_sweep_statistics.pkl')
- print(f'Looking for fixed horizon stats file at {stats_file}')
- if op.exists(stats_file):
- with open(stats_file, 'rb') as file:
- stats = pickle.load(file)
- return stats
-
-# =============================== Gather data ================================ #
-# Loop over dataset categories, then dataset size, then individual runs.
-runs_needing_statistics = []
-finished_runs_needing_post_statistics = []
-results = {}
-
-sent_warning = {'elbow': False, 'cube': False}
-
-for experiment in EXPERIMENTS.keys():
- print(f'\n\n============== Starting {experiment} ==============')
- exp_dict = deepcopy(EXPERIMENT_DICT)
- system = EXPERIMENTS[experiment]['system']
- exp_dict['system'] = system
- exp_dict['prefix'] = EXPERIMENTS[experiment]['prefix']
- exp_dict['data_sweep'] = make_empty_data_sweep_dict()
-
- body_names = BODY_NAMES_BY_SYSTEM[system]
-
- for exponent in DATASET_EXPONENTS:
- data_size = 2 ** exponent
- results_folder_name = f'gt_cube_{data_size}'
- runs_path = op.join(RESULTS_DIR, results_folder_name, 'runs')
- if not op.isdir(runs_path):
- print(f'Could not find {runs_path}; skipping.')
- continue
-
- print(f'\nFound {runs_path}.')
-
- for run in os.listdir(runs_path):
- # if int(run[2:4]) in BAD_RUN_NUMBERS[experiment]:
- # continue
- # if not sent_warning[experiment]:
- # print(f'WARNING: Skipping run numbers ' + \
- # f'{BAD_RUN_NUMBERS[experiment]}')
- # sent_warning[experiment] = True
-
- config, stats, checkpoint = \
- get_config_stats_checkpoint(runs_path, run)
-
- if stats == None:
- print(f'\tNo stats file for {run}; skipping.')
- runs_needing_statistics.append(
- op.join(runs_path, run).split('results/')[-1])
- continue
-
- assert config != None and checkpoint != None
- print(f'\tFound statistics for {run}.', end='')
-
- run_key, run_dict = get_run_info_from_config(config)
-
- performance_dict = \
- get_performance_from_stats(stats, run_dict['result_set'])
- run_dict['results'] = performance_dict
-
- # Check for post-processed statistics.
- post_stats = get_post_processed_stats_file(runs_path, run)
- if post_stats == None:
- print(f' No post-processed statistics found.', end='')
- finished_runs_needing_post_statistics.append(
- op.join(runs_path, run).split('results/')[-1])
-
- else:
- print(f' Found post-processed stats, too.', end='')
- post_performance_dict = get_performance_from_stats(
- post_stats, 'test', kind=POST_KIND)
- run_dict['post_results'] = post_performance_dict
-
- # Check for post-processed fixed horizon statistics.
- fixed_horizon_stats = get_post_processed_fixed_horizon_stats_file(
- runs_path, run)
- if fixed_horizon_stats == None:
- print(f' No fixed horizon stats.')
- else:
- print(f' Also fixed horizon stats!')
- fixed_horizon_dict = get_performance_from_stats(
- fixed_horizon_stats, 'test', kind=FIXED_HORIZON_KIND)
- run_dict['fixed_horizon_post_results'] = fixed_horizon_dict
-
- # If structured, save learned physical parameters.
- if run_dict['structured']:
- best_system_state = checkpoint['best_learned_system_state']
- params_dict = get_physical_parameters(system, body_names,
- best_system_state)
- run_dict['learned_params'] = params_dict
-
- # Store everything in larger dictionary.
- exp_dict['data_sweep'][exponent].update({run_key: run_dict})
-
- results.update({experiment: exp_dict})
-
-print(f'\n\nSaving results to json file at {JSON_OUTPUT_FILE}.')
-with open(JSON_OUTPUT_FILE, 'w') as file:
- json.dump(results, file, indent=2)
-
-# pdb.set_trace()
-
-print(f'\n\nRuns needing statistics: {runs_needing_statistics}')
diff --git a/dair_pll_old/helpers/generate_toss_predictions.bash b/dair_pll_old/helpers/generate_toss_predictions.bash
deleted file mode 100644
index 3e1a4fc..0000000
--- a/dair_pll_old/helpers/generate_toss_predictions.bash
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-#SBATCH --gpus=0
-#SBATCH --mem-per-cpu=10G
-##SBATCH --qos=mp-med
-##SBATCH --partition=posa-compute
-##SBATCH --account mp-account
-#SBATCH --time=12:00:00
-#SBATCH --output=/home/bibit/dair_pll/logs/generate_toss_predictions.txt
-
-source /mnt/kostas-graid/sw/envs/bibit/pll_env/bin/activate;
-export PYTHONPATH=/mnt/kostas-graid/sw/envs/bibit:/home/bibit/dair_pll;
-
-cd /home/bibit/dair_pll/helpers
-PYTHONUNBUFFERED=1 python generate_toss_predictions.py
-
diff --git a/dair_pll_old/helpers/generate_toss_predictions.py b/dair_pll_old/helpers/generate_toss_predictions.py
deleted file mode 100644
index 072d5c0..0000000
--- a/dair_pll_old/helpers/generate_toss_predictions.py
+++ /dev/null
@@ -1,973 +0,0 @@
-"""Script to load a trained system then generate rollouts from the elbow toss
-dataset. Also saves a post-processed statistics file that has the position and
-angle trajectory errors."""
-
-import git
-import math
-import os
-import os.path as op
-import pickle
-import pdb
-import torch
-from torch import Tensor
-
-from dair_pll import file_utils
-from dair_pll.dataset_management import ExperimentDataManager
-from dair_pll.deep_learnable_system import DeepLearnableSystemConfig
-from dair_pll.drake_experiment import DrakeMultibodyLearnableExperiment, \
- DrakeDeepLearnableExperiment, MultibodyLearnableSystemConfig
-from dair_pll.experiment import default_epoch_callback, TrainingState
-from dair_pll.state_space import FloatingBaseSpace
-
-
-PLL_TO_BAG_NUMBERS = {
- 0: 0,
- 1: 1,
- 2: 2,
- 3: 3,
- 4: 4,
- 5: 5,
- 6: 6,
- 7: 7,
- 8: 8,
- 9: 9,
- 10: 10,
- 11: 11,
- 12: 13,
- 13: 14,
- 14: 15,
- 15: 16,
- 15: 17,
- 16: 18,
- 17: 19,
- 18: 20,
- 19: 21,
- 20: 22,
- 21: 23,
- 22: 24,
- 23: 25,
- 24: 26,
- 24: 27,
- 25: 28,
- 26: 29,
- 27: 30,
- 28: 31,
- 29: 32,
- 30: 33,
- 30: 34,
- 31: 35,
- 32: 36,
- 33: 37,
- 34: 38,
- 35: 39,
- 36: 40,
- 37: 41,
- 38: 42,
- 39: 43,
- 40: 44,
- 41: 45,
- 42: 46,
- 43: 47,
- 44: 48,
- 45: 49,
- 46: 50,
- 47: 51,
- 48: 52,
- 49: 53,
- 49: 54,
- 50: 55,
- 51: 56,
- 52: 57,
- 53: 58,
- 53: 59,
- 53: 60,
- 53: 61,
- 54: 62,
- 54: 63,
- 54: 64,
- 55: 65,
- 56: 66,
- 57: 67,
- 58: 68,
- 58: 69,
- 59: 70,
- 60: 71,
- 61: 72,
- 62: 73,
- 63: 74,
- 64: 75,
- 64: 76,
- 65: 77,
- 66: 78,
- 67: 79,
- 68: 80,
- 69: 81,
- 70: 82,
- 71: 83,
- 72: 84,
- 73: 85,
- 74: 86,
- 75: 87,
- 76: 88,
- 77: 89,
- 78: 90,
- 79: 91,
- 80: 92,
- 81: 93,
- 82: 94,
- 82: 95,
- 82: 96,
- 82: 97,
- 83: 98,
- 84: 99,
- 85: 100,
- 86: 101,
- 87: 102,
- 88: 103,
- 89: 104,
- 90: 105,
- 91: 106,
- 92: 107,
- 93: 108,
- 94: 109,
- 95: 110,
- 96: 111,
- 97: 112,
- 98: 113,
- 99: 114,
- 100: 115,
- 101: 116,
- 102: 117,
- 103: 118,
- 104: 119,
- 105: 121,
- 106: 122,
- 107: 123,
- 108: 124,
- 109: 125,
- 110: 126,
- 111: 127,
- 112: 128,
- 113: 129,
- 114: 130,
- 115: 131,
- 116: 132,
- 117: 133,
- 118: 134,
- 119: 135,
- 120: 136,
- 121: 137,
- 122: 138,
- 123: 139,
- 124: 140,
- 125: 141,
- 126: 142,
- 126: 143,
- 126: 144,
- 126: 145,
- 126: 146,
- 127: 147,
- 128: 148,
- 129: 149,
- 130: 150,
- 131: 151,
- 132: 152,
- 133: 153,
- 134: 154,
- 135: 155,
- 136: 156,
- 137: 157,
- 138: 158,
- 139: 159,
- 140: 160,
- 141: 161,
- 142: 162,
- 143: 163,
- 144: 164,
- 145: 165,
- 146: 166,
- 147: 167,
- 148: 168,
- 148: 169,
- 149: 170,
- 150: 171,
- 150: 172,
- 151: 173,
- 152: 174,
- 153: 175,
- 154: 176,
- 154: 177,
- 155: 178,
- 156: 179,
- 157: 180,
- 158: 181,
- 159: 182,
- 160: 183,
- 161: 184,
- 162: 185,
- 163: 186,
- 164: 187,
- 165: 188,
- 166: 189,
- 167: 190,
- 168: 191,
- 169: 192,
- 170: 193,
- 171: 194,
- 172: 195,
- 173: 196,
- 174: 197,
- 174: 198,
- 175: 199,
- 176: 200,
- 177: 201,
- 178: 202,
- 179: 203,
- 180: 204,
- 181: 205,
- 181: 206,
- 181: 207,
- 182: 208,
- 183: 209,
- 184: 210,
- 185: 211,
- 186: 212,
- 187: 213,
- 188: 214,
- 189: 215,
- 190: 216,
- 190: 217,
- 191: 218,
- 192: 219,
- 193: 220,
- 194: 221,
- 195: 222,
- 196: 223,
- 197: 224,
- 198: 225,
- 199: 226,
- 200: 227,
- 201: 228,
- 202: 229,
- 203: 230,
- 203: 231,
- 204: 232,
- 205: 233,
- 206: 234,
- 207: 235,
- 208: 236,
- 209: 237,
- 210: 238,
- 211: 239,
- 212: 240,
- 213: 241,
- 214: 242,
- 215: 243,
- 216: 244,
- 217: 245,
- 217: 246,
- 218: 247,
- 219: 248,
- 220: 249,
- 221: 250,
- 222: 251,
- 223: 252,
- 224: 253,
- 224: 254,
- 224: 255,
- 225: 256,
- 226: 257,
- 227: 258,
- 228: 259,
- 229: 260,
- 229: 261,
- 230: 262,
- 231: 263,
- 232: 265,
- 233: 266,
- 234: 267,
- 235: 268,
- 236: 269,
- 237: 270,
- 238: 271,
- 239: 272,
- 240: 273,
- 241: 274,
- 242: 275,
- 242: 276,
- 243: 277,
- 244: 278,
- 244: 279,
- 244: 280,
- 245: 281,
- 246: 282,
- 247: 283,
- 248: 284,
- 249: 285,
- 250: 286,
- 251: 287,
- 252: 288,
- 253: 289,
- 253: 290,
- 254: 291,
- 254: 292,
- 255: 293,
- 256: 294,
- 257: 295,
- 258: 296,
- 258: 297,
- 259: 298,
- 260: 299,
- 261: 300,
- 262: 301,
- 263: 302,
- 264: 303,
- 265: 304,
- 266: 305,
- 266: 306,
- 267: 307,
- 268: 308,
- 269: 309,
- 270: 310,
- 271: 311,
- 272: 312,
- 273: 313,
- 274: 314,
- 275: 315,
- 276: 316,
- 277: 317,
- 277: 318,
- 278: 319,
- 279: 320,
- 280: 321,
- 281: 322,
- 282: 323,
- 283: 324,
- 283: 325,
- 284: 326,
- 285: 327,
- 286: 328,
- 287: 329,
- 288: 330,
- 289: 331,
- 290: 333,
- 291: 334,
- 292: 335,
- 293: 336,
- 294: 337,
- 295: 338,
- 296: 339,
- 297: 340,
- 298: 341,
- 299: 342,
- 300: 343,
- 301: 344,
- 302: 345,
- 303: 346,
- 304: 347,
- 305: 348,
- 306: 349,
- 307: 350,
- 308: 351,
- 309: 352,
- 310: 353,
- 311: 354,
- 312: 355,
- 313: 356,
- 314: 357,
- 315: 358,
- 316: 359,
- 317: 360,
- 318: 361,
- 319: 362,
- 320: 363,
- 321: 364,
- 322: 365,
- 323: 366,
- 324: 367,
- 324: 368,
- 325: 369,
- 326: 370,
- 327: 371,
- 328: 372,
- 329: 373,
- 330: 374,
- 331: 375,
- 332: 376,
- 332: 377,
- 333: 378,
- 334: 379,
- 335: 380,
- 336: 381,
- 337: 382,
- 338: 383,
- 339: 384,
- 339: 385,
- 340: 386,
- 341: 387,
- 341: 388,
- 342: 389,
- 343: 390,
- 344: 391,
- 345: 392,
- 346: 393,
- 347: 394,
- 347: 395,
- 348: 396,
- 349: 397,
- 350: 398,
- 351: 399,
- 352: 400,
- 353: 401,
- 354: 402,
- 355: 403,
- 356: 404,
- 357: 405,
- 358: 406,
- 359: 407,
- 360: 408,
- 361: 409,
- 362: 410,
- 363: 411,
- 364: 412,
- 365: 413,
- 366: 414,
- 367: 415,
- 368: 416,
- 369: 417,
- 370: 418,
- 371: 419,
- 372: 420,
- 373: 421,
- 374: 422,
- 375: 423,
- 376: 424,
- 377: 425,
- 378: 426,
- 379: 427,
- 379: 428,
- 380: 429,
- 381: 430,
- 382: 431,
- 383: 432,
- 384: 433,
- 385: 434,
- 386: 435,
- 387: 436,
- 388: 437,
- 389: 438,
- 390: 439,
- 391: 440,
- 392: 441,
- 392: 442,
- 393: 443,
- 394: 444,
- 395: 445,
- 396: 446,
- 397: 447,
- 398: 448,
- 399: 449,
- 400: 450,
- 401: 451,
- 402: 452,
- 403: 453,
- 403: 454,
- 404: 455,
- 405: 456,
- 406: 457,
- 407: 458,
- 408: 459,
- 409: 460,
- 410: 461,
- 411: 462,
- 412: 463,
- 413: 464,
- 414: 465,
- 415: 466,
- 416: 467,
- 417: 468,
- 418: 469,
- 419: 470,
- 420: 471,
- 421: 472,
- 422: 473,
- 423: 474,
- 424: 475,
- 425: 476,
- 426: 477,
- 427: 478,
- 428: 479,
- 429: 480,
- 430: 481,
- 431: 482,
- 432: 483,
- 433: 484,
- 433: 485,
- 433: 486,
- 434: 487,
- 435: 488,
- 436: 489,
- 437: 490,
- 438: 491,
- 439: 492,
- 440: 493,
- 441: 494,
- 442: 495,
- 443: 496,
- 444: 497,
- 445: 498,
- 446: 499,
- 447: 500,
- 447: 501,
- 448: 502,
- 449: 503,
- 450: 504,
- 451: 505,
- 452: 506,
- 453: 507,
- 454: 508,
- 454: 509,
- 455: 510,
- 456: 511,
- 457: 512,
- 458: 513,
- 459: 514,
- 460: 515,
- 461: 516,
- 462: 517,
- 463: 518,
- 464: 519,
- 465: 520,
- 466: 521,
- 467: 522,
- 468: 523,
- 469: 524,
- 470: 525,
- 471: 526,
- 472: 527,
- 473: 528,
- 474: 529,
- 475: 530,
- 476: 531,
- 477: 532,
- 478: 533,
- 479: 534,
- 480: 535,
- 481: 536,
- 482: 537,
- 483: 538,
- 484: 539,
- 485: 540,
- 486: 541,
- 487: 542,
- 487: 543,
- 488: 544,
- 489: 545,
- 490: 546,
- 491: 547,
- 492: 548,
- 492: 549,
- 493: 550,
- 494: 551,
- 495: 552,
- 496: 553,
- 497: 554,
- 497: 555,
- 498: 556,
- 499: 557,
- 500: 558,
- 500: 559,
- 501: 560,
- 502: 561,
- 503: 562,
- 504: 563,
- 505: 564,
- 506: 565,
- 507: 566,
- 508: 567,
- 509: 568,
- 510: 569,
- 511: 570,
- 512: 571,
- 513: 572,
- 514: 573,
- 515: 574,
- 516: 575,
- 517: 576,
- 518: 577,
- 519: 578,
- 520: 579,
- 521: 580,
- 522: 581,
- 523: 582,
- 524: 583,
- 525: 584,
- 526: 585,
- 527: 586,
- 528: 587,
- 529: 588,
- 530: 589,
- 531: 590,
- 532: 591,
- 533: 592,
- 533: 593,
- 534: 594,
- 535: 595,
- 536: 596,
- 537: 597,
- 538: 598,
- 539: 599,
- 540: 600
-}
-REPO_DIR = op.normpath(
- git.Repo(search_parent_directories=True).git.rev_parse("--show-toplevel"))
-RESULTS_DIR = op.join(REPO_DIR, 'results')
-ELBOW_ASSET_DIR = op.join(REPO_DIR, 'assets', 'contactnets_elbow')
-
-EXPERIMENT_TYPE_BY_PREFIX = {'sc': 'cube_real', 'se': 'elbow_real',
- 'va': 'vortex_asymmetric',}
- # 've': 'vortex_elbow',
- # 'ba': 'viscous_asymmetric',
- # 'be': 'viscous_elbow',
- #'gc': 'gravity_cube', 'ge': 'gravity_elbow'}
-SYSTEM_BY_PREFIX = {'sc': 'cube', 'se': 'elbow', 'va': 'asymmetric'}
-
-BAD_REAL_RUN_NUMBERS = [i for i in range(24)] + [i for i in range(25, 32)] + \
- [35, 36, 37, 38, 39, 40, 41, 42]
-BAD_SIM_RUN_NUMBERS = [i for i in range(24)] + [i for i in range(25, 30)] + \
- [31, 33, 35]
-FOLDERS_TO_LOAD = [f'sweep_elbow-{i}' for i in range(2, 10)] #+ \
- #[f'sweep_cube-{i}' for i in range(2, 10)] + \
- #[f'sweep_asymmetric_vortex-{i}' for i in range(2, 10)] #+ \
- # [f'sweep_elbow_vortex-{i}' for i in range(2, 10)] + \
- # [f'sweep_elbow_viscous-{i}' for i in range(2, 10)] + \
- # [f'sweep_cube_viscous-{i}' for i in range(2, 10)]
-
-N_STATE = {'cube': 13, 'elbow': 15, 'asymmetric': 13}
-
-# RUNS_TO_LOAD = ['se30-9-0', 'se31-9-0', 'se32-9-0', 'se33-9-0', 'se34-9-0',
-# 'se35-9-0', 'se24-9-0']
-
-PLL_TOSS_NUMS_TO_GENERATE = [0]
-
-# If the below is true, then the generated predictions will be of the first
-# trajectory of the experiment's own test set instead of a specified PLL toss
-# number. When this is the case, since the test set prediction is already
-# provided in the statistics file, we will iterate over rollout horizon and
-# generate many per trajectory.
-MODE_TEST_SET = 'test set'
-MODE_HORIZON_SWEEP = 'horizon sweep'
-MODE_FIXED_HORIZON_TRAJECTORY_SWEEP = 'fixed horizon trajectory sweep'
-MODE = MODE_FIXED_HORIZON_TRAJECTORY_SWEEP
-
-FIXED_HORIZON = 16
-STEP_SIZE = 5
-ROLLOUT_LENGTHS = [1, 2, 4, 8, 16, 32, 64, 120]
-
-
-# ============================= Helper functions ============================= #
-def run_name_to_run_dir(run_name):
- run_prefix = run_name[:2]
- experiment_type = EXPERIMENT_TYPE_BY_PREFIX[run_prefix]
-
- assert 'real' in experiment_type, \
- f'run_name: {run_name}, experiment_type: {experiment_type}'
-
- system = experiment_type.split('_real')[0]
- try:
- sub_number = run_name.split('-')[1]
- subfolder = f'sweep_{system}-{sub_number}'
-
- assert run_name in os.listdir(op.join(RESULTS_DIR, subfolder, 'runs'))
-
- return op.join(RESULTS_DIR, subfolder, 'runs', run_name)
- except:
- return None
-
-def experiment_finished(run_name):
- run_dir = run_name_to_run_dir(run_name)
- if run_dir == None:
- return False
- return os.path.isfile(op.join(run_dir, 'statistics.pkl'))
-
-def post_processing_done(run_name):
- run_dir = run_name_to_run_dir(run_name)
- return os.path.isfile(
- op.join(run_dir, 'post_processing', 'post_statistics.pkl'))
-
-def post_processing_traj_sweep_done(run_name):
- run_dir = run_name_to_run_dir(run_name)
- return os.path.isfile(
- op.join(run_dir, 'traj_sweep_statistics.pkl'))
-
-def load_experiment(run_name):
- run_path = run_name_to_run_dir(run_name)
- storage_name = op.abspath(op.join(run_path, '..', '..'))
-
- experiment_config = file_utils.load_configuration(storage_name, run_name)
-
- if isinstance(experiment_config.learnable_config,
- MultibodyLearnableSystemConfig):
- experiment_config.learnable_config.randomize_initialization = False
- return DrakeMultibodyLearnableExperiment(experiment_config)
- elif isinstance(experiment_config.learnable_config,
- DeepLearnableSystemConfig):
- return DrakeDeepLearnableExperiment(experiment_config)
- raise RuntimeError(f'Cannot recognize learnable type ' + \
- f'{experiment_config.learnable_config}')
-
-def get_best_system_from_experiment(exp):
- checkpoint_filename = file_utils.get_model_filename(exp.config.storage,
- exp.config.run_name)
- checkpoint_dict = torch.load(checkpoint_filename)
- training_state = TrainingState(**checkpoint_dict)
-
- assert training_state.finished_training
-
- exp.learning_data_manager = ExperimentDataManager(
- exp.config.storage, exp.config.data_config,
- training_state.trajectory_set_split_indices)
- train_set, _, test_set = \
- exp.learning_data_manager.get_updated_trajectory_sets()
- learned_system = exp.get_learned_system(torch.cat(train_set.trajectories))
- learned_system.load_state_dict(training_state.best_learned_system_state)
-
- return learned_system
-
-def load_ground_truth_toss_trajectory(system_name, toss_num):
- assert system_name == 'elbow'
- toss_filename = op.join(ELBOW_ASSET_DIR, f'{toss_num}.pt')
- return torch.load(toss_filename)
-
-def compute_predicted_trajectory(experiment, learned_system, target_traj, system_name):
- state_n = N_STATE[system_name]
- assert target_traj.ndim == 2
- assert target_traj.shape[1] == state_n
-
- target_traj_list = [target_traj.reshape(1, -1, state_n)]
-
- predictions, targets = experiment.trajectory_predict(target_traj_list,
- learned_system, do_detach=True)
-
- first_state = target_traj[0].reshape(1, state_n)
- pred_traj = torch.cat((first_state,
- predictions[0].reshape(-1, state_n)), dim=0)
-
- return pred_traj
-
-def make_and_get_post_processing_dir(run_name):
- run_dir = run_name_to_run_dir(run_name)
- post_dir = op.join(run_dir, 'post_processing')
- file_utils.assure_created(post_dir)
- return post_dir
-
-def save_predicted_bag_trajectory(predicted_traj, run_name, pll_toss_num):
- bag_toss_num = PLL_TO_BAG_NUMBERS[pll_toss_num]
- post_dir = make_and_get_post_processing_dir(run_name)
- torch.save(predicted_traj, op.join(post_dir, f'predicted_{bag_toss_num}.pt'))
-
-def get_test_set_traj_target_and_prediction(experiment):
- stats = file_utils.load_evaluation(experiment.config.storage,
- experiment.config.run_name)
- test_traj_target = stats['test_model_target_sample'][0]
- test_traj_prediction = stats['test_model_prediction_sample'][0]
-
- return Tensor(test_traj_target), Tensor(test_traj_prediction)
-
-def get_traj_with_rollout_of_len(traj, rollout_len, experiment, learned_system, system_name):
- state_n = N_STATE[system_name]
- assert traj.ndim == 2
- assert traj.shape[1] == state_n
-
- gt_traj_target = traj[(-rollout_len-1):].reshape(rollout_len+1, state_n)
-
- partial_pred_traj = compute_predicted_trajectory(
- experiment, learned_system, gt_traj_target, system_name)
- first_portion = traj[:(-rollout_len-1)]
- pred_traj = torch.cat((first_portion, partial_pred_traj), dim=0)
-
- return pred_traj
-
-def save_rollout_sweep_trajs(rollouts, run_name):
- post_dir = make_and_get_post_processing_dir(run_name)
- for horizon, traj in zip(ROLLOUT_LENGTHS, rollouts):
- torch.save(traj, op.join(post_dir, f'test_w_horizon_{horizon}.pt'))
-
-def compute_pos_rot_trajectory_errors(target, predicteds, experiment, use_different_trajs=False):
- space = experiment.space
- pos_errors, rot_errors = [], []
-
- targets = target if use_different_trajs else [target] * len(predicteds)
-
- # Iterate over trajectories lengths.
- for tp, tt in zip(predicteds, targets):
-
- running_pos_mse = 0
- running_angle_mse = 0
-
- for space_i in space.spaces:
- if isinstance(space_i, FloatingBaseSpace):
- pos_mse = torch.stack([space_i.base_error(tp, tt)])
- angle_mse = torch.stack([space_i.quaternion_error(tp, tt)])
-
- running_pos_mse += pos_mse
- running_angle_mse += angle_mse
-
- pos_errors.append(running_pos_mse)
- rot_errors.append(running_angle_mse)
-
- return pos_errors, rot_errors
-
-def save_post_processing_stats(pos_errors, rot_errors, run_dir):
- # Make a dictionary.
- stats = {}
-
- for horizon, pos_error, rot_error in \
- zip(ROLLOUT_LENGTHS, pos_errors, rot_errors):
- pos_key = f'test_pos_error_w_horizon_{horizon}'
- rot_key = f'test_rot_error_w_horizon_{horizon}'
-
- stats[pos_key] = pos_error.item()
- stats[rot_key] = rot_error.item()
-
- filename = op.join(run_dir, 'post_processing', 'post_statistics.pkl')
- with open(filename, 'wb') as file:
- pickle.dump(stats, file)
-
-def save_post_processing_traj_sweep_stats(pos_errors, rot_errors, run_dir):
- # Make a dictionary.
- stats = {}
-
- pos_key = f'test_pos_error_w_horizon_{FIXED_HORIZON}'
- rot_key = f'test_rot_error_w_horizon_{FIXED_HORIZON}'
-
- stats[pos_key] = 0
- stats[rot_key] = 0
-
- n_trajs = len(pos_errors)
-
- for pos_error, rot_error in zip(pos_errors, rot_errors):
- stats[pos_key] += pos_error.item()
- stats[rot_key] += rot_error.item()
-
- stats[pos_key] /= n_trajs
- stats[rot_key] /= n_trajs
-
- filename = op.join(run_dir, 'traj_sweep_statistics.pkl')
- with open(filename, 'wb') as file:
- pickle.dump(stats, file)
-
-def get_runs_to_load(folder, real=True):
- bad_numbers = BAD_REAL_RUN_NUMBERS if real else BAD_SIM_RUN_NUMBERS
-
- runs_to_load = os.listdir(op.join(RESULTS_DIR, folder, 'runs'))
- i = 0
- while i < len(runs_to_load):
- if int(runs_to_load[i][2:4]) in bad_numbers:
- runs_to_load.remove(runs_to_load[i])
- else:
- i += 1
-
- return runs_to_load
-
-def load_experiment_run_dir_sys(run_name):
- experiment = load_experiment(run_name)
- run_dir = run_name_to_run_dir(run_name)
- print(f'Loading {run_dir}')
- learned_system = get_best_system_from_experiment(experiment)
- return experiment, run_dir, learned_system
-
-def all_rollouts_of_len(horizon, target_traj, experiment, learned_system, system_name):
- state_n = N_STATE[system_name]
- assert target_traj.ndim == 2
- assert target_traj.shape[1] == state_n
-
- traj_len = target_traj.shape[0]
- n_traj = math.floor((traj_len - horizon - 1)/STEP_SIZE)
-
- # Get all the possible targets contained in the trajectory.
- targets = [target_traj[i*STEP_SIZE:(i*STEP_SIZE)+horizon+1] for i in range(n_traj)]
-
- # Get all the predictions.
- predictions = [
- compute_predicted_trajectory(
- experiment, learned_system, targ, system_name
- ) for targ in targets
- ]
- return predictions, targets
-
-# ============================= Compute rollouts ============================= #
-for folder in FOLDERS_TO_LOAD:
- real = True if ('viscous' not in folder and 'vortex' not in folder) \
- else False
- system = 'cube' if 'cube' in folder else 'elbow' if 'elbow' in folder else \
- 'asymmetric'
-
- runs_to_load = get_runs_to_load(folder, real=real)
-
- for run_name in runs_to_load:
- if not experiment_finished(run_name):
- print(f'Skipping unfinished {run_name}')
- continue
-
- # Use the first test set trajectory in the stats file, and predict all
- # possible rollouts of a fixed length within it.
- if MODE == MODE_FIXED_HORIZON_TRAJECTORY_SWEEP:
- if post_processing_traj_sweep_done(run_name):
- print(f'{run_name} already post-processed twice.')
- continue
- experiment, run_dir, learned_system = \
- load_experiment_run_dir_sys(run_name)
-
- gt_traj, pred_120 = get_test_set_traj_target_and_prediction(
- experiment)
-
- rollouts, ground_truths = all_rollouts_of_len(
- FIXED_HORIZON, gt_traj, experiment, learned_system, system)
- pos_errors, rot_errors = compute_pos_rot_trajectory_errors(
- ground_truths, rollouts, experiment, use_different_trajs=True)
- save_post_processing_traj_sweep_stats(pos_errors, rot_errors, run_dir)
-
- # Use the first test set trajectory in the stats file, and predict with
- # different rollout lengths.
- elif MODE == MODE_HORIZON_SWEEP:
- experiment, run_dir, learned_system = \
- load_experiment_run_dir_sys(run_name)
-
- gt_traj, pred_120 = get_test_set_traj_target_and_prediction(
- experiment)
-
- rollouts = []
- for rollout_len in ROLLOUT_LENGTHS[:-1]:
- rollouts.append(
- get_traj_with_rollout_of_len(
- gt_traj, rollout_len, experiment, learned_system,
- system))
-
- rollouts.append(pred_120)
-
- save_rollout_sweep_trajs(rollouts, run_name)
- pos_errors, rot_errors = compute_pos_rot_trajectory_errors(
- gt_traj, rollouts, experiment)
- save_post_processing_stats(pos_errors, rot_errors, run_dir)
-
- # Compute rollouts from original dataset trajectories.
- elif MODE == MODE_TEST_SET:
- if post_processing_done(run_name):
- print(f'{run_name} already post-processed.')
- continue
- experiment, run_dir, learned_system = \
- load_experiment_run_dir_sys(run_name)
-
- for pll_toss_num in PLL_TOSS_NUMS_TO_GENERATE:
- gt_traj = load_ground_truth_toss_trajectory(
- 'elbow', pll_toss_num)
- l_traj = compute_predicted_trajectory(
- experiment, learned_system, gt_traj, system)
-
- save_predicted_bag_trajectory(l_traj, run_name, pll_toss_num)
-
-# pdb.set_trace()
-# ======================= Compute metrics on rollouts ======================== #
-
-
diff --git a/dair_pll_old/helpers/make_asymmetric_mesh.py b/dair_pll_old/helpers/make_asymmetric_mesh.py
deleted file mode 100644
index c935e91..0000000
--- a/dair_pll_old/helpers/make_asymmetric_mesh.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Helper script to generate a mesh file from custom vertex locations."""
-
-import pdb
-
-from torch import Tensor
-from scipy.spatial import ConvexHull
-
-from dair_pll import file_utils
-from dair_pll.deep_support_function import extract_obj_from_mesh_summary, \
- get_mesh_summary_from_polygon
-from dair_pll.geometry import Polygon
-
-
-MESH_PATH = '/home/bibit/dair_pll/assets/contactnets_asymmetric.obj'
-VERTICES = Tensor([[0., -1, -2],
- [3, 0, 0],
- [0, 2, -1],
- [-1, 1, -1],
- [1, 1, 1],
- [2, -1, 1]]) * 0.025
-
-polygon = Polygon(VERTICES)
-
-mesh_summary = get_mesh_summary_from_polygon(polygon)
-obj = extract_obj_from_mesh_summary(mesh_summary)
-file_utils.save_string(MESH_PATH, obj)
-
-pdb.set_trace()
\ No newline at end of file
diff --git a/dair_pll_old/helpers/plot.py b/dair_pll_old/helpers/plot.py
deleted file mode 100644
index 036bfe7..0000000
--- a/dair_pll_old/helpers/plot.py
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/bin/bash
-#SBATCH --gpus=0
-#SBATCH --mem-per-cpu=10G
-#SBATCH --time=1:00:00
-#SBATCH --qos=posa-high
-#SBATCH --partition=posa-compute
-
-import json
-import math
-import os
-import os.path as op
-import pdb # noqa
-import re
-
-import numpy as np
-
-import matplotlib.pyplot as plt
-from matplotlib import rc, rcParams
-from matplotlib.ticker import FormatStrFormatter
-
-from dair_pll import file_utils
-
-
-rc('legend', fontsize=24)
-plt.rc('axes', titlesize=30) # fontsize of the axes title
-plt.rc('axes', labelsize=24) # fontsize of the x and y labels
-plt.rc('xtick', labelsize=20)
-plt.rc('ytick', labelsize=20)
-
-
-# NAME_TO_EXPERIMENT = {'s00-.+': 'Simulation Inertia Mode 0'} #,
- # 's01-.+': 'Simulation Inertia Mode 1',
- # 's02-.+': 'Simulation Inertia Mode 2',
- # 's03-.+': 'Simulation Inertia Mode 3',
- # 's04-.+': 'Simulation Inertia Mode 4'}
-
-SWEEP_NAMES = ['s05-.+', 's06-.+', 's07-.+', 's08-.+', 's09-.+']
-
-VALIDATION_LOSS = 'valid_model_loss_mean'
-
-RESULTS_FOLDER = file_utils.RESULTS_DIR
-PLOTS_FOLDER = file_utils.PLOTS_DIR
-LOG_FOLDER = file_utils.LOG_DIR
-
-SYSTEMS = ['elbow', 'cube']
-SOURCES = ['real', 'simulation']
-TRAIN_LOSSES = ['ContactNets', 'prediction']
-GEOMETRY_TYPE = ['box', 'mesh']
-INERTIA_MODES = ['none', 'masses', 'CoMs', 'CoMsandmasses', 'all']
-INITIAL_URDF = ['correct', 'wrong']
-DATASET_SIZES = [4, 8, 16, 32, 64, 128, 256, 512]
-CONFIG_KEYS = ['system', 'source', 'loss_type', 'geometry_type',
- 'inertia_mode', 'initial_urdf', 'dataset_size', 'timestep']
-RESULTS_KEYS = ['experiment_config', 'datasets', 'scalars_list', 'stats_list',
- 'success']
-
-"""Get the scalars and statistics per epoch, the experiment settings from the
-params.txt file of the experiment name, a dictionary of the dataset indices
-for each train/valid/test set, and whether the training process went to
-completion, returned as a dictionary of these five elements (exp_config,
-datasets, scalars_list, stats_list, success)."""
-def load_results_from_experiment(exp_name):
- txt_file = f'{RESULTS_FOLDER}/{exp_name}/params.txt'
-
- with open(txt_file, 'r') as f:
- lines = f.readlines()
-
- # do an initial test that the text file format is as expected.
- start_line = lines.index('Epoch 0:\n')
- assert lines[start_line+3] == 'Epoch 1:\n'
-
- def compile_experiment_config_dict(beginning_lines):
- lns = [item for item in beginning_lines if item != '\n']
-
- # first grab experiment configuration settings
- experiment_config = {}
- experiment_config['system'] = lns[1].split(': ')[1][:-1].replace(
- ' ', '').replace('.','')
- experiment_config['source'] = lns[2].split(': ')[1][:-1].replace(
- ' ', '').replace('.','')
- experiment_config['inertia_mode'] = lns[9].split(': ')[1][:-1].replace(
- ' ', '').replace('.','')
- experiment_config['loss_type'] = 'ContactNets' if \
- 'True' in lns[3] else 'prediction'
- experiment_config['geometry_type'] = 'box' if \
- 'True' in lns[4] else 'mesh'
- experiment_config['initial_urdf'] = 'correct' if \
- 'True' in lns[10] else 'wrong'
-
- assert experiment_config['system'] in SYSTEMS
- assert experiment_config['source'] in SOURCES
- assert experiment_config['inertia_mode'] in INERTIA_MODES
-
- config_line = 11
- while 'n_pop' not in lns[config_line]: config_line += 1
-
- # get the timestep
- experiment_config['timestep'] = float(lns[config_line].split(
- 'dt=')[1].split(',')[0])
-
- if experiment_config['source'] == 'simulation':
- # get the dataset size from n_pop
- dataset_str = lns[config_line].split('n_pop=')[1].split(',')[0]
- experiment_config['dataset_size'] = int(dataset_str)
- else:
- # get the dataset size from n_import
- while 'n_import' not in lns[config_line]: config_line += 1
- dataset_str = lns[config_line].split('n_import=')[1].split(',')[0]
- experiment_config['dataset_size'] = int(dataset_str)
-
- assert experiment_config['dataset_size'] in DATASET_SIZES
-
- return experiment_config
-
- def convert_scalars_line_to_dict(line):
- dict_str = line.split('\tscalars: ')[1][:-1]
- dict_str = dict_str.replace('\'', '\"')
- return json.loads(dict_str)
-
- def convert_stats_line_to_dict(line):
- dict_str = line.split('\tstatistics: ')[1][:-1]
- dict_str = dict_str.split(']), ')[-1]
- dict_str = '{' + dict_str.replace('\'', '\"')
- return json.loads(dict_str)
-
- def compile_datasets(beginning_lines):
- datasets = {}
-
- # Get the start and end of the training set indices.
- i = 11
- while 'indices' not in beginning_lines[i]: i += 1
- line = beginning_lines[i]
- j = i+1
- while 'indices' not in beginning_lines[j]:
- line += beginning_lines[j]
- j += 1
-
- train_indices = line.split('[')[1].split(']')[0]
- datasets['train'] = [int(k) for k in train_indices.split(',') if k != '\n']
-
- # Get the start and end of the validation set indices.
- i = j
- j = i+1
- line = beginning_lines[i]
- while 'indices' not in beginning_lines[j]:
- line += beginning_lines[j]
- j += 1
-
- valid_indices = line.split('[')[1].split(']')[0]
- datasets['valid'] = [int(k) for k in valid_indices.split(',') if k != '\n']
-
- # Get the start and end of the test set indices.
- i = j
- j = i+1
- line = beginning_lines[i]
- while j < len(beginning_lines):
- line += beginning_lines[j]
- j += 1
-
- test_indices = line.split('[')[1].split(']')[0]
- datasets['test'] = [int(k) for k in test_indices.split(',') if k != '\n']
-
- return datasets
-
- experiment_config = compile_experiment_config_dict(lines[:start_line])
-
- datasets = compile_datasets(lines[:start_line])
-
- scalars_list = []
- stats_list = [None]
-
- scalars_list.append(convert_scalars_line_to_dict(lines[start_line+1]))
-
- # Collect all the other epoch scalars and statistics.
- # The text file should have epoch numbers listed on a line followed
- # by one line of scalars, possibly multiple lines of statistics,
- # one line of the training loss, then a blank line.
- i = start_line + 4
- while i+1 < len(lines):
- if 'scalars' not in lines[i]:
- i += 1
- continue
-
- scalars_list.append(convert_scalars_line_to_dict(lines[i]))
-
- i += 1
- stats_line = lines[i]
- while 'train_loss' not in lines[i+1]:
- stats_line += lines[i+1]
- i += 1
-
- stats_list.append(convert_stats_line_to_dict(stats_line))
-
- log_file = f'{LOG_FOLDER}/train_{exp_name}.txt'
- with open(log_file, 'r') as f:
- lines = f.readlines()
-
- success = True if 'Saving the final' in lines[-1] else False
-
- return {'experiment_config': experiment_config,
- 'datasets': datasets,
- 'scalars_list': scalars_list,
- 'stats_list': stats_list,
- 'success': success}
-
-"""Make a dictionary with keys for experiment names and entries as another
-dictionary of experiment configs, dataset indices, scalar list, statistics list,
-and training completion success."""
-def load_results(instance_regex):
- pattern = re.compile(instance_regex + '\Z')
- results = {}
- for instance_name in os.listdir(RESULTS_FOLDER):
- if pattern.match(instance_name):
- print(f'Found {instance_name}...')
-
- # get the results
- results[instance_name] = load_results_from_experiment(instance_name)
-
- return results
-
-"""Get the scalars and statistics corresponding to the best validation
-loss and the initial scalars upon initialization, returned as a 3-tuple
-of (initial_scalars, best_scalars, best_stats)."""
-def get_initial_and_learned_model(scalars_list, stats_list):
- lowest_valid_loss = 1e6
- lowest_valid_idx = 1
-
- for i in range(1, len(scalars_list)):
- valid_loss = stats_list[i][VALIDATION_LOSS]
- if valid_loss < lowest_valid_loss:
- lowest_valid_loss = valid_loss
- lowest_valid_idx = i
-
- return (scalars_list[0], scalars_list[lowest_valid_idx],
- stats_list[lowest_valid_idx])
-
-"""Make a plot of some statistic given the results dictionary."""
-def plot_statistics_key_over_epochs(results_dict, key):
- fig = plt.figure()
- ax = plt.gca()
-
- # for every experiment
- for exp in results_dict.keys():
- # check if went to completion or not
- if not results_dict[exp]['success']:
- print(f'Experiment {exp} did not complete -- skipping')
- continue
-
- # grab the right data
- stats_list = results_dict[exp]['stats_list']
-
- n_epochs = len(stats_list)
- stats = []
-
- for i in range(1, n_epochs):
- stats.append(stats_list[i][key])
-
- ax.plot(range(1, n_epochs), stats, linewidth=3, label=exp)
-
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1f"))
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
-
- plt.xlabel('Epochs')
- plt.ylabel(key)
- plt.legend(prop=dict(weight='bold'))
- fig.set_size_inches(13, 13)
- fig.savefig(f'{PLOTS_FOLDER}/{key}.png', dpi=100)
- print(f'Saved {PLOTS_FOLDER}/{key}.png')
-
-
-if __name__ == "__main__":
- all_results = {}
- for sweep_name in SWEEP_NAMES:
- results = load_results(sweep_name)
- all_results = {**all_results, **results}
-
- plot_statistics_key_over_epochs(all_results, VALIDATION_LOSS)
-
- exp_config, dataset_indices, scalar_list, stat_list = \
- load_results_from_experiment('t19')
- scalars, stats = get_learned_model(scalar_list, stat_list)
- pdb.set_trace()
-
-
-
diff --git a/dair_pll_old/helpers/plot_reference.py b/dair_pll_old/helpers/plot_reference.py
deleted file mode 100644
index 8908bcc..0000000
--- a/dair_pll_old/helpers/plot_reference.py
+++ /dev/null
@@ -1,258 +0,0 @@
-from collections import defaultdict
-import sys
-
-import json
-import math
-import os
-import os.path as op
-import pdb # noqa
-import re
-from typing import Any, DefaultDict, List, Tuple
-
-from matplotlib import rc, rcParams
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FormatStrFormatter, NullFormatter
-import numpy as np
-
-
-
-RESULTS_DIR = os.path.join(os.path.dirname(__file__), '..', 'results')
-OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'plots')
-
-
-# Some settings on the plot generation.
-rc('legend', fontsize=30)
-plt.rc('axes', titlesize=40) # fontsize of the axes title
-plt.rc('axes', labelsize=40) # fontsize of the x and y labels
-
-
-YSCALE = 1
-USE_LOGS = [True, True, True, False, False, False, False]
-PLOT_POINTS = False
-
-YFIELDS = ['train_model_trajectory_mse_mean',
- 'valid_model_trajectory_mse_mean',
- 'train_loss',
- 'cube_body_len_x',
- 'cube_body_len_y',
- 'cube_body_len_z',
- 'cube_body_mu']
-YLABELS = ['Trajectory state space error (training)',
- 'Trajectory state space error',
- 'Training loss',
- 'Cube x length (normalized)',
- 'Cube y length (normalized)',
- 'Cube z length (normalized)',
- 'Friction coefficient (normalized)']
-VAL_SCALES = [1.0, 1.0, 1.0, 1.0/0.1048, 1.0/0.1048, 1.0/0.1048, 1.0/0.15]
-
-MODELS = {'ContactNets, L': ['cn_.+-0', 'cn_.+-2', 'cn_.+-4', 'cn_.+-6'],
- 'ContactNets, S': ['cn_.+-1', 'cn_.+-3', 'cn_.+-5', 'cn_.+-7'],
- 'DiffSim, L': ['ds_.+-0', 'ds_.+-2', 'ds_.+-4', 'ds_.+-6'],
- 'DiffSim, S': ['ds_.+-1', 'ds_.+-3', 'ds_.+-5', 'ds_.+-7']}
-LABEL_LOOKUP = {'cn_.+-0': 'ContactNets, L',
- 'cn_.+-1': 'ContactNets, S',
- 'cn_.+-2': 'ContactNets, L',
- 'cn_.+-3': 'ContactNets, S',
- 'cn_.+-4': 'ContactNets, L',
- 'cn_.+-5': 'ContactNets, S',
- 'cn_.+-6': 'ContactNets, L',
- 'cn_.+-7': 'ContactNets, S',
- 'ds_.+-0': 'DiffSim, L',
- 'ds_.+-1': 'DiffSim, S',
- 'ds_.+-2': 'DiffSim, L',
- 'ds_.+-3': 'DiffSim, S',
- 'ds_.+-4': 'DiffSim, L',
- 'ds_.+-5': 'DiffSim, S',
- 'ds_.+-6': 'DiffSim, L',
- 'ds_.+-7': 'DiffSim, S'}
-COLOR_LOOKUP = {'DiffSim, L': '#95001a', 'ContactNets, L': '#01256e',
- 'DiffSim, S': '#92668d', 'ContactNets, S': '#398537'} #4a0042
-
-for (yfield, ylabel, val_scale, use_log) in \
-zip(YFIELDS, YLABELS, VAL_SCALES, USE_LOGS):
-
- print(f'\n\n========== Starting {yfield} ==========')
-
- def num(s: str):
- try:
- return int(s)
- except ValueError:
- return float(s)
-
- def load_results(instance_regex: str) -> Tuple[DefaultDict[int, List[Any]], bool]:
- pattern = re.compile(instance_regex + '\Z')
- results = defaultdict(list)
-
- # load results from previous tests
- for instance_name in os.listdir(RESULTS_DIR):
- if (pattern.match(instance_name)) and '64' not in instance_name:
- # print(f'\tFound {instance_name} folder...')
-
- params_file = op.join(RESULTS_DIR, instance_name, 'params.txt')
-
- if not os.path.isfile(params_file):
- print(f'\t\t--> did not find params_file in {instance_name}')
- continue
-
- data_size = int(instance_name.split('_')[-1].split('-')[0])
-
- stats = read_params_file(params_file)
- results[int(data_size)].append(stats)
-
- return results
-
- def read_params_file(file_name):
- file = open(file_name, "r")
-
- filestr = file.read().replace('\'', '')
-
- stats = {}
- for key in YFIELDS:
- stats[key] = float(filestr.split(f'{key}: ')[-1].split(',')[0].split('}')[0])
-
- return stats
-
- def extract_xys(results, y_field):
- extracted = defaultdict(list)
- for i in results.keys():
- for result in results[i]:
- extracted[i].append(float(result[y_field] * val_scale))
- return extracted
-
- def extract_points(results, y_field):
- extracted = extract_xys(results, y_field)
- xs, ys = [], []
- for x in extracted.keys():
- for y in extracted[x]:
- xs.append(x)
- ys.append(y)
- return xs, ys
-
- def scatter_to_t_conf_int_plot(extracted):
- # the following are t values for 95% confidence interval
- t_per_dof = {1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776,
- 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306,
- 9: 2.262, 10: 2.228, 0: 0.5}
-
- means, lowers, uppers = {}, {}, {}
-
- for k, v in extracted.items():
- dof = len(v) - 1
- means[k] = np.mean(v)
- lowers[k] = np.mean(v) - t_per_dof[dof]*np.std(v)/np.sqrt(dof+1)
- uppers[k] = np.mean(v) + t_per_dof[dof]*np.std(v)/np.sqrt(dof+1)
-
- xs = list(means.keys())
- ys, y_lowers, y_uppers = [], [], []
-
- for x in xs:
- ys.append(means[x])
- y_lowers.append(lowers[x])
- y_uppers.append(uppers[x])
-
- xs, ys, y_lowers, y_uppers = zip(*sorted(zip(xs, ys, y_lowers, y_uppers)))
-
- return xs, ys, y_lowers, y_uppers
-
- def get_data_counts(extracted):
- return {k: len(v) for k, v in extracted.items()}
-
- fig = plt.figure()
- ax = plt.gca()
-
- for model in MODELS.keys():
- print(f'Working on {model}:', end='')
-
- dicts = []
- for mod in MODELS[model]:
- results = load_results(mod)
- dicts.append(results)
-
- combined_results = {}
- for k in dicts[0].keys():
- combined_results[k] = []
- for d in dicts:
- for item in d[k]:
- combined_results[k].append(item)
-
- results = combined_results
- prefix = ''
-
- if PLOT_POINTS:
- xs, ys = extract_points(results, prefix + yfield)
- xs = [x / 2 for x in xs]
- plt.scatter(xs, ys, s=200, c=COLOR_LOOKUP[model],
- label=LABEL_LOOKUP[model], alpha=0.5)
- else:
- extracted = extract_xys(results, prefix + yfield)
- print(f' with counts {get_data_counts(extracted)}')
- xs, ys, y_lowers, y_uppers = scatter_to_t_conf_int_plot(extracted)
- xs = [x / 2 for x in xs]
- ax.plot(xs, ys, label=model, linewidth=5, color=COLOR_LOOKUP[model])
- ax.fill_between(xs, y_lowers, y_uppers, alpha=0.3, color=COLOR_LOOKUP[model])
-
- ax.set_xscale('log')
- if use_log:
- ax.set_YSCALE('log')
- elif yfield == 'cube_body_mu':
- ax.set_ylim(0, 3.5)
- else:
- ax.set_ylim(0, 1.5)
-
- xs = [2 * 2**j for j in range(0, 4)]
- ax.set_xlim(min(xs), max(xs))
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- xs_rounded = [round(x, 1) for x in xs]
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(xs_rounded)
- ax.set_xticklabels(xs_rounded)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
- if ('body_len' in yfield) or ('body_mu' in yfield):
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1f"))
- else:
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
-
- plt.xlabel('Training tosses')
- plt.ylabel(ylabel)
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- plt.legend(handles, labels)
- plt.legend(loc=1, prop=dict(weight='bold'))
-
- fig.set_size_inches(13, 13)
-
- fig.savefig(f'{OUTPUT_DIR}/{yfield}.png', dpi=100)
- # fig.savefig(f'{OUTPUT_DIR}/tp_{yfield}.png', transparent=True, dpi=100)
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/dair_pll_old/helpers/plot_results.py b/dair_pll_old/helpers/plot_results.py
deleted file mode 100644
index b93f14e..0000000
--- a/dair_pll_old/helpers/plot_results.py
+++ /dev/null
@@ -1,919 +0,0 @@
-from collections import defaultdict
-from copy import deepcopy
-import sys
-
-import json
-import math
-import os
-import os.path as op
-import pdb # noqa
-import re
-from typing import Any, DefaultDict, List, Tuple
-
-from matplotlib import rc, rcParams
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FormatStrFormatter, NullFormatter
-import numpy as np
-from scipy.optimize import linprog
-from scipy.spatial import ConvexHull, HalfspaceIntersection
-from helpers.gather_results import FIXED_HORIZON_METRICS
-import torch
-from torch import Tensor
-from dair_pll.system import MeshSummary
-from dair_pll.deep_support_function import extract_outward_normal_hyperplanes
-
-STATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'results', 'storage', 'runs', 'bundlesdf', 'statistics.pkl')
-RESULTS_DIR = os.path.join(os.path.dirname(__file__), '..', 'results', 'storage', 'runs')
-OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'plots')
-JSON_OUTPUT_FILE = op.join(os.path.dirname(__file__), 'results.json')
-TITLE_BY_EXPERIMENT = {
- 'cube': 'Cube with Real Data',
- 'elbow': 'Articulated Object with Real Data',
- 'asymmetric_vortex': 'Asymmetric in Vortex Sim',
- 'asymmetric_viscous': 'Asymmetric in Viscous Sim',
- 'elbow_vortex': 'Articulated Object in Vortex Sim',
- 'elbow_viscous': 'Articulated Object in Vortex Sim',
- 'elbow_gravity': 'Articulated Object in Gravity Sim',
- 'cube_gravity': 'Cube in Gravity Sim'}
-
-DATASET_SIZES = [4, 8, 16, 32, 64, 128, 256, 512]
-METHOD_RESULTS = {#'VimpI': '#01256e',
- #'VimpI RP': '#398537',
- 'CCN': '#01256e', #'#1111ff',
- # 'CCN-R': '#398537', #'#11ff11',
- # 'DiffSim': '#95001a',
- # 'DiffSim-R': '#92668d',
- # 'End-to-End': '#4a0042',
- }
-
-CUBE_VERTICES = Tensor([
- [ -0.052400, -0.052400, 0.052400],
- [ 0.052400, -0.052400, 0.052400],
- [ -0.052400, 0.052400, 0.052400],
- [ 0.052400, 0.052400, 0.052400],
- [ -0.052400, 0.052400, -0.052400],
- [ 0.052400, 0.052400, -0.052400],
- [ -0.052400, -0.052400, -0.052400],
- [ 0.052400, -0.052400, -0.052400]])
-
-## TODO
-BOTTLE_VERTICES = Tensor([
-
-])
-
-CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY = {
- 'cube': {
- 'body': {
- 'diameter_x': 0.1048, 'diameter_y': 0.1048, 'diameter_z': 0.1048,
- 'center_x': 0., 'center_y': 0., 'center_z': 0.,
- 'mu': 0.15, 'm': 0.37, 'px': 0.0, 'py': 0.0, 'pz': 0.0,
- 'I_xx': 0.00081, 'I_yy': 0.00081, 'I_zz': 0.00081,
- 'I_xy': 0.0, 'I_xz': 0.0, 'I_yz': 0.0,
- 'scaling_vector': 1.0 / np.array([
- 0.37, 0.035, 0.035, 0.035, 0.00081, 0.00081, 0.00081, 0.00081,
- 0.00081, 0.00081]), 'vertices': CUBE_VERTICES
- }
- }
-}
-SYSTEM_BY_EXPERIMENT = {
- 'cube': 'cube'}
-FIXED_HORIZON_METRICS_BY_EXPERIMENT = {
- 'cube': [],
-}
-GEOMETRY_PARAMETER_ERROR = 'geometry_parameter_error'
-VERTEX_ERROR = 'vertex_error'
-VOLUME_ERROR = 'volume_error'
-FRICTION_PARAMETER_ERROR = 'friction_error'
-INERTIA_PARAMETER_ERROR = 'inertia_error'
-PARAMETER_METRICS_BY_EXPERIMENT = {
- 'cube': [GEOMETRY_PARAMETER_ERROR, VERTEX_ERROR, VOLUME_ERROR]}
-METRICS = {'model_loss_mean': {
- 'label': 'Loss', 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [None, None], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- # 'oracle_loss_mean': {
- # 'label': 'Loss',
- # 'yformat': "%.0f", 'scaling': 1.0,
- # 'ylims': {'elbow': [None, None], 'cube': [None, None],
- # 'asymmetric': [None, None]},
- # 'legend_loc': 'best'},
- 'model_trajectory_mse_mean': {
- 'label': 'Accumulated trajectory error', 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [None, None], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_pos_int_traj': {
- 'label': 'Trajectory positional error [m]', 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [-0.01, 0.4], 'cube': [-0.01, 0.4],
- 'asymmetric': [-0.01, 0.4]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_angle_int_traj': {
- 'label': 'Trajectory rotational error [deg]',
- 'scaling': 180/np.pi,
- 'yformat': {'elbow': "%.0f", 'cube': "%.0f",
- 'asymmetric': "%.0f"},
- 'ylims': {'elbow': [0.0, 140], 'cube': [0.0, 140],
- 'asymmetric': [0.0, 140]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- 'model_penetration_int_traj': {
- 'label': 'Trajectory penetration [m]', 'scaling': 1.0,
- 'yformat': {'elbow': "%.3f", 'cube': "%.3f",
- 'asymmetric': "%.3f"},
- 'ylims': {'elbow': [-0.005, 0.03], 'cube': [None, None],
- 'asymmetric': [None, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False}
- }
-PARAMETER_ERRORS = {
- GEOMETRY_PARAMETER_ERROR: {'label': 'Geometry parameter error [m]',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.3f", 'cube': "%.3f",
- 'asymmetric': "%.3f"},
- 'ylims': {'elbow': [0.0, None],
- 'cube': [0.0, None],
- 'asymmetric': [0.0, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- VERTEX_ERROR: {'label': 'Average vertex location error [m]',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, None],
- 'cube': [0.0, None],
- 'asymmetric': [0.0, None]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- VOLUME_ERROR: {'label': 'Relative volume error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.2f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, 0.52],
- 'cube': [0.0, 0.52],
- 'asymmetric': [0.0, 0.52]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- FRICTION_PARAMETER_ERROR: {'label': 'Friction error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.1f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.0, 0.85],
- 'cube': [0.0, 0.85],
- 'asymmetric': [0.0, 0.85]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': False},
- INERTIA_PARAMETER_ERROR: {'label': 'Inertia parameter error',
- 'scaling': 1.0,
- 'yformat': {'elbow': "%.0f", 'cube': "%.2f",
- 'asymmetric': "%.2f"},
- 'ylims': {'elbow': [0.1, 300],
- 'cube': [0.1, 300],
- 'asymmetric': [0.1, 300]},
- 'legend_loc': {'elbow': 'best', 'cube': 'best',
- 'asymmetric': 'best'},
- 'log': True},
-}
-# The following are t values for 95% confidence interval.
-T_SCORE_PER_DOF = {1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776,
- 5: 2.571, 6: 2.447, 7: 2.365, 8: 2.306,
- 9: 2.262, 10: 2.228, 11: 2.201, 12: 2.179,
- 13: 2.160, 14: 2.145, 15: 2.131, 16: 2.120,
- 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086,
- 21: 2.080, 22: 2.074, 23: 2.069, 24: 2.064,
- 25: 2.060, 26: 2.056, 27: 2.052, 28: 2.048,
- 29: 2.045, 30: 2.042}
-N_RUNS = 'n_runs'
-DATASET_SIZE_DICT = {2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []}
-XS = [2**(key-1) for key in DATASET_SIZE_DICT.keys()]
-
-# Some settings on the plot generation.
-rc('legend', fontsize=30)
-plt.rc('axes', titlesize=40) # fontsize of the axes title
-plt.rc('axes', labelsize=40) # fontsize of the x and y labels
-
-def _get_mesh_interior_point(halfspaces: np.ndarray) -> Tuple[np.ndarray, float]:
- norm_vector = np.reshape(np.linalg.norm(halfspaces[:, :-1], axis=1),
- (halfspaces.shape[0], 1))
- objective_coefficients = np.zeros((halfspaces.shape[1],))
- objective_coefficients[-1] = -1
- A = np.hstack((halfspaces[:, :-1], norm_vector))
- b = -halfspaces[:, -1:]
- res = linprog(objective_coefficients, A_ub=A, b_ub=b, bounds=(None, None))
- interior_point = res.x[:-1]
- interior_point_gap = res.x[-1]
- return interior_point, interior_point_gap
-
-def extract_mesh_from_support_points(support_points: Tensor) -> MeshSummary:
- """Given a set of convex polytope vertices, extracts a vertex/face mesh.
-
- Args:
- support_points: ``(*, 3)`` polytope vertices.
-
- Returns:
- Object vertices and face indices.
- """
- support_point_hashes = set()
- unique_support_points = []
-
- # remove duplicate vertices
- for vertex in support_points:
- vertex_hash = hash(vertex.numpy().tobytes())
- if vertex_hash in support_point_hashes:
- continue
- support_point_hashes.add(vertex_hash)
- unique_support_points.append(vertex)
-
- vertices = torch.stack(unique_support_points)
- hull = ConvexHull(vertices.numpy())
- faces = Tensor(hull.simplices).to(torch.long) # type: ignore
-
- _, backwards, _ = extract_outward_normal_hyperplanes(
- vertices.unsqueeze(0), faces.unsqueeze(0))
- backwards = backwards.squeeze(0)
- faces[backwards] = faces[backwards].flip(-1)
-
- return MeshSummary(vertices=support_points, faces=faces)
-
-def calculate_error_vertices(vertices_learned: Tensor,
- vertices_true: Tensor) -> Tensor:
- """Relative error between two convex hulls of provided vertices.
-
- use the identity that the area of the non-overlapping region is the
- sum of the areas of the two polygons minus twice the area of their
- intersection.
-
- Args:
- vertices_learned: (N, 3) tensor of vertices of the learned geometry.
- vertices_true: (N, 3) tensor of vertices of the true geometry.
- """
- # pylint: disable=too-many-locals
- true_volume = ConvexHull(vertices_true.numpy()).volume
- sum_volume = ConvexHull(vertices_learned.numpy()).volume + true_volume
-
- mesh_learned = extract_mesh_from_support_points(vertices_learned)
- mesh_true = extract_mesh_from_support_points(vertices_true)
-
- normal_learned, _, extent_learned = extract_outward_normal_hyperplanes(
- mesh_learned.vertices.unsqueeze(0), mesh_learned.faces.unsqueeze(0))
- normal_true, _, extent_true = extract_outward_normal_hyperplanes(
- mesh_true.vertices.unsqueeze(0), mesh_true.faces.unsqueeze(0))
-
- halfspaces_true = torch.cat(
- [normal_true.squeeze(), -extent_true.squeeze().unsqueeze(-1)],
- dim=1)
-
- halfspaces_learned = torch.cat(
- [normal_learned.squeeze(), -extent_learned.squeeze().unsqueeze(-1)],
- dim=1)
-
- intersection_halfspaces = torch.cat(
- [halfspaces_true, halfspaces_learned], dim=0).numpy()
-
- # find interior point of intersection
- interior_point, interior_point_gap = _get_mesh_interior_point(
- intersection_halfspaces)
-
- intersection_volume = 0.
-
- if interior_point_gap > 0.:
- # intersection is non-empty
- intersection_halfspace_convex = HalfspaceIntersection(
- intersection_halfspaces, interior_point)
-
- intersection_volume = ConvexHull(
- intersection_halfspace_convex.intersections).volume
-
- return Tensor([sum_volume - 2 * intersection_volume
- ]).abs() / true_volume
-
-def calculate_vertex_position_error(true_vertices, learned_vertices):
- true_vertices = Tensor(true_vertices)
- learned_vertices = Tensor(learned_vertices)
- assert true_vertices.shape == learned_vertices.shape
- assert true_vertices.shape[1] == 3
-
- vert_displacement = true_vertices - learned_vertices
- vert_dists = torch.linalg.norm(vert_displacement, dim=1)
-
- return vert_dists.sum().item()
-
-def get_single_body_correct_geometry_array(system, body):
- # In order of diameters then centers x y z, get the correct parameters.
- params = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]
- ground_truth = np.array([params['diameter_x'], params['diameter_y'],
- params['diameter_z'], params['center_x'], params['center_y'],
- params['center_z']])
- return ground_truth
-
-def get_single_body_correct_inertia_array(system, body):
- # In order of mass, CoM xyz, inertia xx yy zz xy xz yz, get the correct
- # parameters.
- params = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]
- ground_truth = np.array([params['m'], params['px'], params['py'],
- params['pz'], params['I_xx'], params['I_yy'], params['I_zz'],
- params['I_xy'], params['I_xz'], params['I_yz']])
- return ground_truth
-
-def calculate_geometry_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- vertex_err = 0.
- volume_err = 0.
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth_verts = \
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['vertices']
- learned_verts = Tensor(body_dict['vertices'])
-
- vertex_err += calculate_error_vertices(
- learned_verts, ground_truth_verts).item()
- volume_err += calculate_vertex_position_error(
- ground_truth_verts, learned_verts)
-
- ground_truth = get_single_body_correct_geometry_array(system, body)
-
- learned = np.array([body_dict['diameter_x'], body_dict['diameter_y'],
- body_dict['diameter_z'], body_dict['center_x'],
- body_dict['center_y'], body_dict['center_z']])
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate geometry error as norm of the difference between learned and
- # true values.
- geometry_error = np.linalg.norm(true_vals - learned_vals)
-
- n_bodies = len(CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys())
- n_verts = len(CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['vertices'])
-
- vertex_error = vertex_err / (n_bodies * n_verts)
- volume_error = volume_err / n_bodies
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({GEOMETRY_PARAMETER_ERROR: geometry_error})
- run_dict['results'].update({VERTEX_ERROR: vertex_error})
- run_dict['results'].update({VOLUME_ERROR: volume_error})
- return run_dict
-
-def calculate_inertia_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth = get_single_body_correct_inertia_array(system, body)
-
- learned = np.array([body_dict['m'], body_dict['px'], body_dict['py'],
- body_dict['pz'], body_dict['I_xx'],
- body_dict['I_yy'], body_dict['I_zz'],
- body_dict['I_xy'], body_dict['I_xz'],
- body_dict['I_yz']])
-
- # Since inertia parameters can be such different sizes, scale all of
- # them to get on similar scale.
- ground_truth = np.multiply(
- ground_truth,
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['scaling_vector']
- )
- learned = np.multiply(
- learned,
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['scaling_vector']
- )
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate inertia error as norm of the scaled difference between learned
- # and true values.
- inertia_error = np.linalg.norm(true_vals - learned_vals)
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({INERTIA_PARAMETER_ERROR: inertia_error})
- return run_dict
-
-def calculate_friction_error(run_dict, experiment):
- system = SYSTEM_BY_EXPERIMENT[experiment]
-
- # Start an empty numpy array to store true and learned values.
- true_vals = np.array([])
- learned_vals = np.array([])
-
- # Iterate over bodies in the system.
- for body in CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys():
- body_dict = run_dict['learned_params'][body]
-
- ground_truth = np.array([
- CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system][body]['mu']])
- learned = np.array([body_dict['mu']])
-
- true_vals = np.concatenate((true_vals, ground_truth))
- learned_vals = np.concatenate((learned_vals, learned))
-
- # Calculate friction error as norm of the difference between learned and
- # true values.
- friction_error = np.linalg.norm(true_vals - learned_vals)
-
- # Insert this error into the results dictionary.
- run_dict['results'].update({FRICTION_PARAMETER_ERROR: friction_error})
- return run_dict
-
-def convert_parameters_to_errors(run_dict, experiment, gravity=False):
- params_dict = run_dict['learned_params']
- if params_dict == None:
- return run_dict
-
- exp_key = f'{experiment}_gravity' if gravity else experiment
-
- for param_metric in PARAMETER_METRICS_BY_EXPERIMENT[exp_key]:
- if param_metric == GEOMETRY_PARAMETER_ERROR:
- run_dict = calculate_geometry_error(run_dict, experiment)
- elif param_metric == FRICTION_PARAMETER_ERROR:
- run_dict = calculate_friction_error(run_dict, experiment)
- elif param_metric == INERTIA_PARAMETER_ERROR:
- run_dict = calculate_inertia_error(run_dict, experiment)
- elif param_metric in [VERTEX_ERROR, VOLUME_ERROR]:
- # These are already calculated in the geometry error function.
- pass
- else:
- raise RuntimeError(f"Can't handle {param_metric} type.")
-
- return run_dict
-
-def set_of_vals_to_t_confidence_interval(ys):
- if len(ys) <= 1:
- return None, None, None
-
- dof = len(ys) - 1
-
- ys_np = np.array(ys)
-
- mean = np.mean(ys)
- lower = mean - T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
- upper = mean + T_SCORE_PER_DOF[dof]*np.std(ys)/np.sqrt(dof+1)
-
- return mean, lower, upper
-
-def convert_lists_to_t_conf_dict(exp_dict, sweep_instance):
- # Iterate over methods then metrics and parameters.
- for method in METHOD_RESULTS.keys():
- # Here "quantity" can be a metric or parameter.
- for quantity in exp_dict[method].keys():
- if quantity == N_RUNS:
- continue
-
- vals = exp_dict[method][quantity][sweep_instance]
-
- mean, lower, upper = set_of_vals_to_t_confidence_interval(vals)
-
- exp_dict[method][quantity][sweep_instance] = {
- 'mean': mean, 'lower': lower, 'upper': upper
- }
- exp_dict[method][N_RUNS][sweep_instance] = \
- max(len(vals), exp_dict[method][N_RUNS][sweep_instance])
-
- return exp_dict
-
-def get_plottable_run_counts(exp_dict, method, gravity=False):
- data_dict = exp_dict[method][N_RUNS]
-
- xs, ys = [], []
-
- for x in data_dict.keys():
- if not gravity:
- xs.append(2**(x-1))
- else:
- xs.append(x)
- ys.append(data_dict[x])
-
- return xs, ys
-
-def get_empty_experiment_dict_by_experiment(experiment):
- # First get a list of bodies in the system.
- system = SYSTEM_BY_EXPERIMENT[experiment]
- bodies = CORRECT_PARAMETERS_BY_SYSTEM_AND_BODY[system].keys()
-
- # Then build structure.
- empty_dict_per_experiment = deepcopy(METHOD_RESULTS)
- for method in empty_dict_per_experiment.keys():
- empty_dict_per_experiment[method] = deepcopy(METRICS)
- empty_dict_per_experiment[method].update(
- {N_RUNS: deepcopy(DATASET_SIZE_DICT)})
- for metric in METRICS.keys():
- empty_dict_per_experiment[method][metric] = \
- deepcopy(DATASET_SIZE_DICT)
- for param_metric in PARAMETER_METRICS_BY_EXPERIMENT[experiment]:
- empty_dict_per_experiment[method].update(
- {param_metric: deepcopy(DATASET_SIZE_DICT)})
- for post_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- empty_dict_per_experiment[method].update(
- {post_metric: deepcopy(DATASET_SIZE_DICT)})
- for exponent in DATASET_SIZE_DICT.keys():
- empty_dict_per_experiment[method][N_RUNS][exponent] = 0
-
- return empty_dict_per_experiment
-
-def do_run_num_plot(exp_dict, experiment, gravity=False):
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- xs, ys = get_plottable_run_counts(exp_dict, method, gravity=gravity)
-
- # Plot the run numbers.
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
-
- if not gravity:
- ax.set_xscale('log')
- ax.set_xlim(min(XS), max(XS))
- x_markers = [round(x, 1) for x in XS]
- else:
- ax.set_xlim(0, 2)
- x_markers = [0, 0.5, 1, 1.5, 2]
-
- ax.set_ylim(0, None)
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(x_markers)
- ax.set_xticklabels(x_markers)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
-
- ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- ax.yaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
-
- if not gravity:
- plt.xlabel('Training tosses')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- else:
- plt.xlabel('Gravity fraction')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.1f"))
-
- plt.ylabel('Number of runs')
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- plt.legend(handles, labels)
- plt.legend(prop=dict(weight='bold'))
-
- fig.set_size_inches(13, 13)
-
- plt.title(experiment)
- fig_name = 'gravity_' if gravity else ''
- fig_name += f'{experiment}_run_nums.png'
- fig_path = op.join(OUTPUT_DIR, fig_name)
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
-def include_fixed_horizon_post_stats(run_dict, experiment):
- params_dict = run_dict['fixed_horizon_post_results']
- if params_dict == None:
- print("Don't have fixed horizon data")
- return run_dict
-
- for post_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- run_dict['results'].update({
- post_metric: params_dict[f'test_{post_metric}']})
- return run_dict
-
-def format_plot(ax, fig, metric, metric_lookup, experiment, gravity=False):
- # system = SYSTEM_BY_EXPERIMENT[experiment.split('_gravity')[0]]
- system = SYSTEM_BY_EXPERIMENT['cube'] # TODO: Only support cube now
- if not gravity:
- ax.set_xscale('log')
- ax.set_xlim(min(XS), max(XS))
- x_markers = [round(x, 1) for x in XS]
- else:
- ax.set_xlim(0, 2*9.81)
- x_markers = [0, 0.5*9.81, 1*9.81, 1.5*9.81, 2*9.81]
-
- if metric_lookup[metric]['log']:
- ax.set_yscale('log')
-
- ax.set_ylim(bottom=metric_lookup[metric]['ylims'][system][0],
- top=metric_lookup[metric]['ylims'][system][1])
-
- ax.xaxis.set_major_formatter(NullFormatter())
- ax.xaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_minor_formatter(NullFormatter())
- ax.yaxis.set_major_formatter(NullFormatter())
-
- ax.set_xticks([])
- ax.set_xticklabels([])
- ax.set_xticks(x_markers)
- if metric == "volume_error":
- ax.set_xticklabels(x_markers)
-
- ax.tick_params(axis='x', which='minor', bottom=False, labelsize=20)
- ax.tick_params(axis='x', which='major', bottom=False, labelsize=20)
-
- ax.tick_params(axis='y', which='minor', labelsize=20)
- ax.tick_params(axis='y', which='major', labelsize=20)
-
- ax.yaxis.set_major_formatter(
- FormatStrFormatter(metric_lookup[metric]['yformat'][system]))
- # ax.yaxis.set_minor_formatter(
- # FormatStrFormatter(metric_lookup[metric]['yformat'][system]))
- print(f'=============================metric: {metric}')
- if metric in ["volume_error", FRICTION_PARAMETER_ERROR,
- INERTIA_PARAMETER_ERROR]:
- if not gravity:
- plt.xlabel('Training tosses')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
- else:
- plt.xlabel('Modeled Gravity Acceleration [$m/s^2$]')
- ax.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
-
-
- if (experiment == 'elbow' and not gravity) or \
- metric in [INERTIA_PARAMETER_ERROR, FRICTION_PARAMETER_ERROR]:
- plt.ylabel(metric_lookup[metric]['label'])
- else:
- ax.set_yticklabels([])
-
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
-
- lines = ax.get_lines()
-
- handles, labels = plt.gca().get_legend_handles_labels()
-
- if metric in ['model_pos_int_traj', FRICTION_PARAMETER_ERROR,
- INERTIA_PARAMETER_ERROR]:
- plt.title(TITLE_BY_EXPERIMENT[experiment], fontsize=40)
-
- fig.set_size_inches(13, 13)
-
-def format_plot_(ax, fig, metric, metric_lookup, experiment, gravity=False):
- system = SYSTEM_BY_EXPERIMENT['cube'] # TODO: Only support cube now
- if not gravity:
- ax.set_xscale('log')
- ax.set_xlim(min(XS), max(XS))
- x_markers = [round(x, 1) for x in XS]
- if metric_lookup[metric]['log']:
- ax.set_yscale('log')
-
- ax.set_ylim(bottom=metric_lookup[metric]['ylims'][system][0],
- top=metric_lookup[metric]['ylims'][system][1])
- ax.set_xticks(x_markers)
- ax.set_xticklabels(x_markers)
- ax.yaxis.grid(True, which='both')
- ax.xaxis.grid(True, which='major')
- plt.xlabel('Training tosses')
- plt.ylabel(metric_lookup[metric]['label'])
- plt.title(TITLE_BY_EXPERIMENT[experiment], fontsize=40)
- fig.set_size_inches(13, 13)
-
-def get_method_name_by_run_dict(run_dict):
- if not run_dict['structured']:
- return 'End-to-End'
- elif not run_dict['contactnets'] and run_dict['residual']:
- return 'DiffSim-R'
- elif not run_dict['contactnets'] and not run_dict['residual']:
- return 'DiffSim'
- elif run_dict['loss_variation'] == 3:
- return 'dummy'
- # if run_dict['contactnets'] and run_dict['residual']:
- # return 'VimpI RP'
- # elif run_dict['contactnets'] and not run_dict['residual']:
- # return 'VimpI'
- elif run_dict['loss_variation'] == 1:
- if run_dict['contactnets'] and run_dict['residual']:
- return 'CCN-R'
- elif run_dict['contactnets'] and not run_dict['residual']:
- return 'CCN'
-
- raise RuntimeError(f"Unknown method with run_dict: {run_dict}")
-
-def fill_exp_dict_with_single_run_data(run_dict, sweep_instance, exp_dict, gravity=False):
- method = get_method_name_by_run_dict(run_dict)
- exp_key = f'{experiment}_gravity' if gravity else experiment
-
- for result_metric in run_dict['results'].keys():
- new_key = result_metric[5:] if result_metric[:5] == 'test_' else \
- result_metric
-
- if new_key in METRICS:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
- tmp = run_dict['results'][result_metric]
- elif new_key in PARAMETER_METRICS_BY_EXPERIMENT[exp_key]:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
- tmp = run_dict['results'][result_metric]
- elif new_key in FIXED_HORIZON_METRICS_BY_EXPERIMENT[exp_key]:
- exp_dict[method][new_key][sweep_instance].append(
- run_dict['results'][result_metric])
- return exp_dict
-
-def get_plottable_values(exp_dict, metric, method, metric_lookup, gravity=False):
- try:
- data_dict = exp_dict[method][metric]
- except:
- return [None], [None], [None], [None]
-
- xs, ys, lowers, uppers = [], [], [], []
-
- scaling = metric_lookup[metric]['scaling']
-
- for x in data_dict.keys():
- if gravity:
- xs.append(x*9.81)
- else:
- xs.append(2**(x-1))
- ys.append(data_dict[x]['mean'])
- lowers.append(data_dict[x]['lower'])
- uppers.append(data_dict[x]['upper'])
-
- if None not in ys:
- ys = [y*scaling for y in ys]
- lowers = [lower*scaling for lower in lowers]
- uppers = [upper*scaling for upper in uppers]
-
- return xs, ys, lowers, uppers
-
-def get_plottable_values_no_conf(exp_dict, metric, method, metric_lookup, gravity=False):
- try:
- data_dict = exp_dict[method][metric]
- except:
- return [None], [None], [None], [None]
-
- xs, ys = [], []
- scaling = metric_lookup[metric]['scaling']
- for x in data_dict.keys():
- if gravity:
- xs.append(x*9.81)
- else:
- xs.append(2**(x-1))
- ys.append(data_dict[x][0])
- if None not in ys:
- ys = [y*scaling for y in ys]
- return xs, ys
-
-if __name__ == '__main__':
- with open(JSON_OUTPUT_FILE) as file:
- results = json.load(file)
- sent_warning = False
-
- for experiment in results.keys():
- system = SYSTEM_BY_EXPERIMENT[experiment]
- exp_dict = get_empty_experiment_dict_by_experiment(experiment)
- data_sweep = results[experiment]['data_sweep']
- # Iterate over dataset sizes to collect all the data.
- for exponent_str in data_sweep.keys():
- exponent = int(exponent_str)
-
- # Iterate over runs.
- for run_name, run_dict in data_sweep[exponent_str].items():
- # if run_name[2:4] in RUN_NUMBERS_TO_SKIP:
- # if not sent_warning:
- # print(f'WARNING: Skipping any run numbers in ' + \
- # f'{RUN_NUMBERS_TO_SKIP}.')
- # sent_warning = True
- # continue
- run_dict = convert_parameters_to_errors(run_dict, experiment)
- # print(f'run_dict: {run_dict}')
- # if get_method_name_by_run_dict(run_dict) == 'dummy': continue
- # TODO: Currently don't have fixed horizon stats
- run_dict = include_fixed_horizon_post_stats(run_dict, experiment)
- exp_dict = fill_exp_dict_with_single_run_data(run_dict, exponent,
- exp_dict)
- # print(exp_dict)
- # print(exp_dict['CCN'].keys())
- # print(exp_dict['CCN']['model_loss_mean'])
- # Convert lists to dictionary with keys average, upper, and lower.
- # if use_t_conf:
- # exp_dict = convert_lists_to_t_conf_dict(exp_dict, exponent)
-
- # Iterate over the metrics to do plots of each.
- for metric in METRICS.keys():
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- # if use_t_conf:
- # xs, ys, lowers, uppers = get_plottable_values(exp_dict, metric,
- # method, METRICS)
- xs, ys = get_plottable_values_no_conf(exp_dict, metric,
- method, METRICS)
- # Plot the method unless there are any None objects.
- # if None in ys or None in lowers or None in lowers:
- # continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- # ax.fill_between(xs, lowers, uppers, alpha=0.3,
- # color=METHOD_RESULTS[method])
-
- format_plot_(ax, fig, metric, METRICS, experiment)
-
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{metric}.png')
- print(f'Saving fig at: {fig_path}')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Iterate over parameter metrics to do plots of each.
- for parameter_metric in PARAMETER_METRICS_BY_EXPERIMENT[experiment]:
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- # if use_conf:
- # xs, ys, lowers, uppers = get_plottable_values(
- # exp_dict, parameter_metric, method, PARAMETER_ERRORS)
- xs, ys = get_plottable_values_no_conf(exp_dict, metric,
- method, METRICS)
- # Plot the method unless there are any None objects.
- # if None in ys or None in lowers or None in lowers:
- # continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- # ax.fill_between(xs, lowers, uppers, alpha=0.3,
- # color=METHOD_RESULTS[method])
-
- format_plot_(ax, fig, parameter_metric, PARAMETER_ERRORS, experiment)
-
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{parameter_metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Iterate over fixed horizon post-processing metrics to do plots of each.
- for fixed_horizon_metric in FIXED_HORIZON_METRICS_BY_EXPERIMENT[experiment]:
- # Start a plot.
- fig = plt.figure()
- ax = plt.gca()
-
- for method in METHOD_RESULTS.keys():
- # if use_conf:
- # xs, ys, lowers, uppers = get_plottable_values(
- # exp_dict, fixed_horizon_metric, method, FIXED_HORIZON_METRICS)
- xs, ys = get_plottable_values_no_conf(exp_dict, metric,
- method, METRICS)
- # Plot the method unless there are any None objects.
- # if None in ys or None in lowers or None in lowers:
- # continue
-
- ax.plot(xs, ys, label=method, linewidth=5,
- color=METHOD_RESULTS[method])
- # ax.fill_between(xs, lowers, uppers, alpha=0.3,
- # color=METHOD_RESULTS[method])
-
- format_plot_(ax, fig, fixed_horizon_metric, FIXED_HORIZON_METRICS, experiment)
- plt.title(experiment)
- fig_path = op.join(OUTPUT_DIR, f'{experiment}_{fixed_horizon_metric}.png')
- fig.savefig(fig_path, dpi=100)
- plt.close()
-
- # Add in a test plot of the number of experiments.
- do_run_num_plot(exp_dict, experiment)
-
\ No newline at end of file
diff --git a/dair_pll_old/helpers/results.json b/dair_pll_old/helpers/results.json
deleted file mode 100644
index b40bf61..0000000
--- a/dair_pll_old/helpers/results.json
+++ /dev/null
@@ -1,712 +0,0 @@
-{
- "cube": {
- "system": "cube",
- "prefix": "bundlesdf",
- "data_sweep": {
- "2": {
- "bundlesdf_polygon_4": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 0.9217517960997864,
- "test_v_plus_squared_mean": 8.008196548025536,
- "test_model_loss_mean": 0.7081652938934796,
- "test_oracle_loss_mean": 0.33701947943817606,
- "test_model_trajectory_mse_mean": 3670.349808575899,
- "test_oracle_trajectory_mse_mean": 2995.834490915543,
- "test_model_pos_int_traj": 0.1028059276477552,
- "test_oracle_pos_int_traj": 0.05766853105277331,
- "test_model_angle_int_traj": 1.0214570107090484,
- "test_oracle_angle_int_traj": 1.1123231188454517,
- "test_model_penetration_int_traj": 0.002903569226001484,
- "test_oracle_penetration_int_traj": 0.007287028718230018
- },
- "learned_params": {
- "body": {
- "m": 0.36999999999999555,
- "px": -0.013140669792183535,
- "py": 0.004364866809541152,
- "pz": -0.0021506723292677487,
- "I_xx": 0.0005219529588016391,
- "I_yy": 0.000523675174764287,
- "I_zz": 0.0005006515052464978,
- "I_xy": -2.1008465028073733e-05,
- "I_xz": 4.064642398588234e-05,
- "I_yz": 6.63522707016624e-05,
- "mu": 0.19929445561595777,
- "diameter_x": 0.15139629963900703,
- "diameter_y": 0.09875254968337287,
- "diameter_z": 0.11748405762530519,
- "center_x": 0.0018236990414054952,
- "center_y": -0.0024931259629540695,
- "center_z": 0.0072400803002872285,
- "vertices": [
- [
- -0.07311975750421848,
- -0.0509557924191007,
- 0.05219477202031128
- ],
- [
- 0.04989426586206843,
- -0.0518694008046405,
- 0.043625466908864555
- ],
- [
- -0.07387445077809802,
- 0.04025565630147194,
- 0.0478391432841565
- ],
- [
- 0.05206141149378202,
- 0.03033904794796238,
- 0.06598210911293982
- ],
- [
- -0.04880451534909203,
- 0.046883148878732364,
- -0.05007573879345093
- ],
- [
- 0.07752184886090902,
- 0.024895567219154452,
- -0.051501948512365364
- ],
- [
- -0.05080602646548413,
- -0.051362432644149816,
- -0.0506744434946042
- ],
- [
- 0.052689830659608854,
- -0.05143277016443818,
- -0.048459048445270964
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "3": {
- "bundlesdf_polygon_8": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.9506098374868246,
- "test_v_plus_squared_mean": 17.084460801050408,
- "test_model_loss_mean": 7.017050773361515,
- "test_oracle_loss_mean": 0.24987527312608288,
- "test_model_trajectory_mse_mean": 965.3571692553128,
- "test_oracle_trajectory_mse_mean": 1000.3285764351244,
- "test_model_pos_int_traj": 0.08580439278349268,
- "test_oracle_pos_int_traj": 0.05440579373355096,
- "test_model_angle_int_traj": 0.2999113558570343,
- "test_oracle_angle_int_traj": 0.20452762995569082,
- "test_model_penetration_int_traj": 0.019200226112491333,
- "test_oracle_penetration_int_traj": 0.0072011407251909324
- },
- "learned_params": {
- "body": {
- "m": 0.3699999999999115,
- "px": 0.004224793931305089,
- "py": 0.00638075244758463,
- "pz": -0.002926523714627936,
- "I_xx": 0.0004927213083489744,
- "I_yy": 0.0005018638441412061,
- "I_zz": 0.00046036083941130337,
- "I_xy": -2.035257190454881e-05,
- "I_xz": 7.214500503584634e-06,
- "I_yz": -1.6302802681673354e-05,
- "mu": 0.18405650845589325,
- "diameter_x": 0.10472433895377911,
- "diameter_y": 0.11701880871944007,
- "diameter_z": 0.09821906387625684,
- "center_x": 0.00019902204979467888,
- "center_y": -0.006833358777633112,
- "center_z": -0.0007374021724949921,
- "vertices": [
- [
- -0.05216314742709487,
- -0.06534276313735314,
- 0.002978712106701904
- ],
- [
- 0.05149008999410646,
- -0.06179060514052133,
- 0.041945693171425234
- ],
- [
- -0.05163758581687623,
- 0.05167604558208692,
- 0.04837212976563343
- ],
- [
- 0.05256119152668423,
- 0.05051081599555027,
- 0.04726552263081582
- ],
- [
- -0.050815847917757374,
- 0.051196573946073166,
- -0.049846934110623416
- ],
- [
- 0.05234743387347766,
- 0.05066796201380315,
- -0.048226028089776746
- ],
- [
- -0.05125370727469417,
- -0.03639142610955008,
- -0.049724348090072375
- ],
- [
- 0.050489656747156025,
- -0.049351947850741706,
- -0.048342426403207586
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "4": {
- "bundlesdf_polygon_16": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.7941709159787567,
- "test_v_plus_squared_mean": 25.397730055324523,
- "test_model_loss_mean": 0.5375105954188135,
- "test_oracle_loss_mean": 0.34202959805313793,
- "test_model_trajectory_mse_mean": 1741.9393706421793,
- "test_oracle_trajectory_mse_mean": 1958.9507549659356,
- "test_model_pos_int_traj": 0.05948039587342914,
- "test_oracle_pos_int_traj": 0.05608240179903316,
- "test_model_angle_int_traj": 0.5253689945425871,
- "test_oracle_angle_int_traj": 0.572014963057567,
- "test_model_penetration_int_traj": 0.005764027111940869,
- "test_oracle_penetration_int_traj": 0.006216662596893576
- },
- "learned_params": {
- "body": {
- "m": 0.3699999999999763,
- "px": 0.0009375132800615605,
- "py": -0.0022555377808982286,
- "pz": 0.00652014678838196,
- "I_xx": 0.0006186908834514132,
- "I_yy": 0.0006159468428939615,
- "I_zz": 0.0006284906427183698,
- "I_xy": 9.917560229585376e-05,
- "I_xz": 8.587712086840097e-05,
- "I_yz": 3.036171731487472e-05,
- "mu": 0.14223023364590773,
- "diameter_x": 0.10376141735285016,
- "diameter_y": 0.10387330303122118,
- "diameter_z": 0.10156060307632625,
- "center_x": 8.637966251999127e-05,
- "center_y": 7.943129805482174e-05,
- "center_z": 0.0013126975915799845,
- "vertices": [
- [
- -0.051794329013905085,
- -0.050457758881840946,
- 0.04939665450988748
- ],
- [
- 0.05151026159592347,
- -0.05037121223043065,
- 0.05209299912974311
- ],
- [
- -0.05002127017486822,
- 0.05135037328058622,
- 0.05076732954414618
- ],
- [
- 0.051895905507368015,
- 0.04997260731836747,
- 0.051818229450499464
- ],
- [
- -0.05047969196983385,
- 0.051746231163523865,
- -0.04650615968783861
- ],
- [
- 0.05196708833894507,
- 0.05201608281366541,
- -0.043670716434917344
- ],
- [
- -0.05059389925667265,
- -0.051758510602067966,
- -0.049467603946583144
- ],
- [
- 0.050232848682787934,
- -0.05185722021755577,
- -0.03944785648144373
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "5": {
- "bundlesdf_polygon_32": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.7250608293318408,
- "test_v_plus_squared_mean": 16.832799940751162,
- "test_model_loss_mean": 0.5516578310225213,
- "test_oracle_loss_mean": 0.35220639799547404,
- "test_model_trajectory_mse_mean": 938.8179112864713,
- "test_oracle_trajectory_mse_mean": 1510.9288157470187,
- "test_model_pos_int_traj": 0.057122637051206474,
- "test_oracle_pos_int_traj": 0.05003985709760879,
- "test_model_angle_int_traj": 0.2064346025033596,
- "test_oracle_angle_int_traj": 0.2651287184040619,
- "test_model_penetration_int_traj": 0.005626874519316625,
- "test_oracle_penetration_int_traj": 0.006424403341914204
- },
- "learned_params": {
- "body": {
- "m": 0.3699999999999622,
- "px": 0.002679741393578073,
- "py": -0.0025300115547340655,
- "pz": 0.0015291528227625354,
- "I_xx": 0.0005826425366477232,
- "I_yy": 0.0005717244937245672,
- "I_zz": 0.0005781250124419109,
- "I_xy": -3.4076741530378696e-05,
- "I_xz": -9.398716417704297e-06,
- "I_yz": -9.129427513664469e-06,
- "mu": 0.151429681858099,
- "diameter_x": 0.09999964604067349,
- "diameter_y": 0.10369323879347418,
- "diameter_z": 0.10357809803126117,
- "center_x": -0.001439962412607479,
- "center_y": 9.625399734557866e-05,
- "center_z": -0.0001250694802597159,
- "vertices": [
- [
- -0.051439785432944224,
- -0.050818757231946156,
- 0.051613964550211666
- ],
- [
- 0.02843836621737213,
- -0.05175036539939151,
- 0.051663979535370864
- ],
- [
- -0.05050307753185057,
- 0.0502730212792881,
- 0.050856315663829026
- ],
- [
- 0.0427646293090361,
- 0.05194287339408267,
- 0.05106240120018823
- ],
- [
- -0.05127817199004499,
- 0.0510384886215607,
- -0.05108752135769287
- ],
- [
- 0.048454623019185544,
- 0.05174985227750724,
- -0.050523137694804855
- ],
- [
- -0.05107129890509469,
- -0.04984849028263197,
- -0.051288222608318605
- ],
- [
- 0.048559860607729266,
- -0.04949431164512397,
- -0.051914118495890296
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "6": {
- "bundlesdf_polygon_64": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.7627334951032145,
- "test_v_plus_squared_mean": 19.744389981848173,
- "test_model_loss_mean": 0.7164805735609984,
- "test_oracle_loss_mean": 0.33754994897731566,
- "test_model_trajectory_mse_mean": 1713.8802620935562,
- "test_oracle_trajectory_mse_mean": 1846.1685174543486,
- "test_model_pos_int_traj": 0.05630226592390562,
- "test_oracle_pos_int_traj": 0.054470632199272645,
- "test_model_angle_int_traj": 0.5847852280096151,
- "test_oracle_angle_int_traj": 0.4422725759988579,
- "test_model_penetration_int_traj": 0.0012013199522117124,
- "test_oracle_penetration_int_traj": 0.006459323198607108
- },
- "learned_params": {
- "body": {
- "m": 0.3700000000000206,
- "px": 0.003331662273964109,
- "py": 0.0037125966067250945,
- "pz": 0.001961659690576871,
- "I_xx": 0.0006531459164144626,
- "I_yy": 0.0006524503777696288,
- "I_zz": 0.0006568098014782165,
- "I_xy": 1.2568455287512219e-05,
- "I_xz": -1.0103855663377873e-05,
- "I_yz": -4.402680751527439e-06,
- "mu": 0.14296159597311908,
- "diameter_x": 0.10285046454189323,
- "diameter_y": 0.10268317515657936,
- "diameter_z": 0.10255063716891391,
- "center_x": 6.893470521728354e-05,
- "center_y": 9.248335176247763e-05,
- "center_z": -0.000194612436269892,
- "vertices": [
- [
- -0.051225633618305656,
- -0.0512466187290075,
- 0.049585732886648054
- ],
- [
- 0.05121908229076181,
- -0.04982588207417974,
- 0.05108070614818706
- ],
- [
- -0.0508901689300684,
- 0.05072202502850164,
- 0.05101200501007527
- ],
- [
- 0.051494166976163894,
- 0.05095737884031742,
- 0.05066387695849375
- ],
- [
- -0.05064831672691559,
- 0.05143407093005216,
- -0.05049315486750525
- ],
- [
- 0.05069270231570977,
- 0.051212924561548426,
- -0.05042798993393957
- ],
- [
- -0.05135629756572933,
- -0.05057612659266798,
- -0.05146857231803114
- ],
- [
- 0.05111380083153531,
- -0.0512491042265272,
- -0.05146993102072685
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "7": {
- "bundlesdf_polygon_128": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.674740907084207,
- "test_v_plus_squared_mean": 16.12461107994049,
- "test_model_loss_mean": 0.6397378407749176,
- "test_oracle_loss_mean": 0.362757191391614,
- "test_model_trajectory_mse_mean": 1349.7632518403645,
- "test_oracle_trajectory_mse_mean": 1538.3382727420708,
- "test_model_pos_int_traj": 0.05607829320999889,
- "test_oracle_pos_int_traj": 0.052048364600423745,
- "test_model_angle_int_traj": 0.3912481953837623,
- "test_oracle_angle_int_traj": 0.3457484583946435,
- "test_model_penetration_int_traj": 0.0014026904971144246,
- "test_oracle_penetration_int_traj": 0.006612637156043849
- },
- "learned_params": {
- "body": {
- "m": 0.36999999999999506,
- "px": -0.004000887743770452,
- "py": 0.0015933077825294464,
- "pz": 0.0005212188532708687,
- "I_xx": 0.0006404806248526154,
- "I_yy": 0.0006386667077971535,
- "I_zz": 0.0006382946056762977,
- "I_xy": -7.584347930714781e-06,
- "I_xz": 2.3501529533752057e-05,
- "I_yz": 1.7256328946758455e-05,
- "mu": 0.15094031982083236,
- "diameter_x": 0.10241615682902633,
- "diameter_y": 0.10278596444160358,
- "diameter_z": 0.10246119303112285,
- "center_x": 0.00023148590180046072,
- "center_y": -7.494736117476947e-05,
- "center_z": -0.0003424690174790093,
- "vertices": [
- [
- -0.050931766317179133,
- -0.05105821327610534,
- 0.050888127498082414
- ],
- [
- 0.05143956431631363,
- -0.05146792958197656,
- 0.049593035141372824
- ],
- [
- -0.05061210328403809,
- 0.05131803485962702,
- 0.049955321109450265
- ],
- [
- 0.05119046106917727,
- 0.05079138367725209,
- 0.04986299966360887
- ],
- [
- -0.05097659251271271,
- 0.05097367420315141,
- -0.050721079530892665
- ],
- [
- 0.05051968652945197,
- 0.050946152866357355,
- -0.05095260024258356
- ],
- [
- -0.04950236862343668,
- -0.050729461935993515,
- -0.05097544369130357
- ],
- [
- 0.051052410411677365,
- -0.05016835331681574,
- -0.05157306553304043
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "8": {
- "bundlesdf_polygon_256": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.694782236532321,
- "test_v_plus_squared_mean": 17.914083212891263,
- "test_model_loss_mean": 0.6662193824692064,
- "test_oracle_loss_mean": 0.32753416861432244,
- "test_model_trajectory_mse_mean": 1534.8985719803773,
- "test_oracle_trajectory_mse_mean": 1752.987279884503,
- "test_model_pos_int_traj": 0.05728602316678977,
- "test_oracle_pos_int_traj": 0.050776540749978116,
- "test_model_angle_int_traj": 0.4278209486202247,
- "test_oracle_angle_int_traj": 0.3838806317420259,
- "test_model_penetration_int_traj": 0.0010188387745625714,
- "test_oracle_penetration_int_traj": 0.006537836989890044
- },
- "learned_params": {
- "body": {
- "m": 0.36999999999999394,
- "px": -0.0001259266303703479,
- "py": 0.007191888253203249,
- "pz": -0.0028556895922017644,
- "I_xx": 0.000668667649281082,
- "I_yy": 0.0006688793968911133,
- "I_zz": 0.0006682621257496545,
- "I_xy": -1.2948608450065287e-05,
- "I_xz": 1.4836105725834432e-05,
- "I_yz": -3.836567435694041e-06,
- "mu": 0.15205102762862766,
- "diameter_x": 0.10241696152830077,
- "diameter_y": 0.10273675844915178,
- "diameter_z": 0.10319119437261232,
- "center_x": -4.780579883603697e-05,
- "center_y": 5.1786459527820156e-05,
- "center_z": 9.133872765774292e-06,
- "vertices": [
- [
- -0.05125628656298642,
- -0.05116577991032455,
- 0.050906729807011455
- ],
- [
- 0.05045952955534723,
- -0.05100295299419966,
- 0.0505848742818439
- ],
- [
- -0.0509371469416208,
- 0.05114683116070482,
- 0.051033670027073125
- ],
- [
- 0.05078045843447287,
- 0.05101281717052189,
- 0.05160473105907193
- ],
- [
- -0.05098147838852546,
- 0.05118249043676415,
- -0.05086624200034732
- ],
- [
- 0.0509581735285212,
- 0.05142016568410371,
- -0.050836075270032324
- ],
- [
- -0.05078303685282537,
- -0.05131659276504807,
- -0.05128639919915526
- ],
- [
- 0.051160674965314346,
- -0.05062555156538773,
- -0.05158646331354038
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- },
- "9": {
- "bundlesdf_512": {
- "structured": true,
- "contactnets": true,
- "loss_variation": 0,
- "residual": false,
- "result_set": "test",
- "results": {
- "test_delta_v_squared_mean": 1.8421861954735241,
- "test_v_plus_squared_mean": 17.90235838967406,
- "test_model_loss_mean": 0.5858928862158066,
- "test_oracle_loss_mean": 0.3395149067097729,
- "test_model_trajectory_mse_mean": 1116.002209412494,
- "test_oracle_trajectory_mse_mean": 1398.2771235703817,
- "test_model_pos_int_traj": 0.05197079182018731,
- "test_oracle_pos_int_traj": 0.048667603509305005,
- "test_model_angle_int_traj": 0.3012779896940418,
- "test_oracle_angle_int_traj": 0.3091159522426972,
- "test_model_penetration_int_traj": 0.0009934945778009312,
- "test_oracle_penetration_int_traj": 0.006627991634598714
- },
- "learned_params": {
- "body": {
- "m": 0.3699999999999889,
- "px": -0.0023150953271525924,
- "py": -0.00047466882684452094,
- "pz": 0.0008684598150368096,
- "I_xx": 0.0006784441116416432,
- "I_yy": 0.0006767197438011258,
- "I_zz": 0.0006778573503708249,
- "I_xy": 7.823480097367016e-07,
- "I_xz": -6.137074854679443e-06,
- "I_yz": 3.934006764263276e-06,
- "mu": 0.14643497625170504,
- "diameter_x": 0.10215854516608186,
- "diameter_y": 0.10247648187006037,
- "diameter_z": 0.10244930790596048,
- "center_x": 5.876941215519557e-05,
- "center_y": -0.00010699839762408694,
- "center_z": 6.537311863917539e-06,
- "vertices": [
- [
- -0.05093432959588452,
- -0.05134523933265427,
- 0.05074422774817064
- ],
- [
- 0.05113804199519613,
- -0.05112967192569806,
- 0.051231191264844156
- ],
- [
- -0.05081966815603261,
- 0.05097683690845231,
- 0.051009948567673595
- ],
- [
- 0.05088432617407388,
- 0.0511312425374061,
- 0.05112917503487126
- ],
- [
- -0.05102050317088574,
- 0.05111858796999205,
- -0.05048040035416967
- ],
- [
- 0.051093145844060175,
- 0.051128136923914616,
- -0.05114704491813991
- ],
- [
- -0.05070304589321202,
- -0.05084764444435395,
- -0.05116353849095671
- ],
- [
- 0.05075027329059087,
- -0.05102093305198592,
- -0.05121811664111632
- ]
- ]
- }
- },
- "post_results": null,
- "fixed_horizon_post_results": null
- }
- }
- }
- }
-}
\ No newline at end of file
diff --git a/dair_pll_old/pylintrc b/dair_pll_old/pylintrc
deleted file mode 100644
index ff867f8..0000000
--- a/dair_pll_old/pylintrc
+++ /dev/null
@@ -1,9 +0,0 @@
-[BASIC]
-good-names-rgxs=^(J(q|qdot|v)_|Dt([A-Z](([a-z]|\d+))?)?)?((t|F)_[A-Z](([a-z]|\d+))?|f_[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?|(R|X|w|V|alpha|A)_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?|T_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?|p_[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?|(v|a|I|M|L)_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?|K_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?|(v|a)_[A-Z](([a-z]|\d+))?_[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?|(V|A)_[A-Z](([a-z]|\d+))?_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?|L_[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?[A-Z](([a-z]|\d+))?(([a-z]|cm|\d+))?)(_[A-Z](([a-z]|\d+))?)?$
-
-good-names=q,v,x,u,dq,dv,dx,du,dt,t,qs,vs,xs,us,pi,M,J,J_x,J_y,J_z,J_n,J_t,Q,q,E,Q_full,lr,wd,mu,r,p
-
-method-rgx=(?:(?P[a-z_]+)|(?P([A-Z][a-z]*)*))$
-
-[MESSAGES CONTROL]
-disable=E0611
diff --git a/dair_pll_old/setup.py b/dair_pll_old/setup.py
deleted file mode 100644
index 652cf66..0000000
--- a/dair_pll_old/setup.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from setuptools import setup
-
-install_reqs = [
- # library
- 'torch',
- 'moviepy',
- 'Pillow',
- 'wandb',
- 'mujoco-py',
- 'optuna',
- 'numpy',
- 'scipy',
- 'typing_extensions',
- 'matplotlib',
- 'threadpoolctl',
- 'click',
- 'pywavefront',
- 'python-fcl',
- 'gitpython',
- 'protobuf==3.20.*',
- 'cvxpylayers',
- # documentation
- 'networkx',
- 'protobuf==3.20.*',
- 'pydeps==1.10.12',
- 'Sphinx',
- 'sphinx-autodoc-typehints',
- 'sphinx-rtd-theme',
- 'sphinx-toolbox',
- 'sphinxcontrib-bibtex',
- # development
- 'yapf',
- 'pylint',
- 'mypy',
-]
-
-try:
- import pydrake
-
- print('USING FOUND DRAKE VERSION')
-except ModuleNotFoundError as e:
- install_reqs += ['drake']
-
-dependency_links = [
- 'git+https://github.com/DAIRLab/drake-pytorch.git',
- 'git+https://github.com/mshalm/sappy.git'
-]
-
-setup(
- name='dair_pll',
- version='0.0.1',
- packages=['dair_pll'],
- install_requires=install_reqs,
- dependency_links=dependency_links
-)
diff --git a/dair_pll_old/test/inertia_conversions.py b/dair_pll_old/test/inertia_conversions.py
deleted file mode 100644
index 0ee8260..0000000
--- a/dair_pll_old/test/inertia_conversions.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""Test script for InertialParameterConverter.
-"""
-import torch
-import pdb
-
-from dair_pll.inertia import InertialParameterConverter as ipc
-
-
-# torch.set_default_dtype(torch.float64) <-- moved into inertia.py
-
-N_TESTS = 20
-VERBOSE = False
-THETA_SCALE = 1e1
-N_BODIES_UP_TO = 6
-
-
-# Check that theta_to_pi_o and pi_o_to_theta are inverses of each other.
-def do_theta_pi_o_test():
- n_bodies = int(torch.rand(1).item()*(N_BODIES_UP_TO-1)+1)
-
- # Make a random set of theta parameters.
- theta = (torch.rand(n_bodies, 10) - 0.5) * THETA_SCALE
-
- # Calculate pi_o from this random theta.
- pi_o = ipc.theta_to_pi_o(theta)
-
- # Return the norm difference between theta and theta>pi_o>theta.
- return torch.norm(theta - ipc.pi_o_to_theta(pi_o))
-
-
-# Check that pi_o_to_pi_cm and pi_cm_to_pi_o are inverses of each other.
-def do_pi_o_cm_test(use_thetas = True):
- n_bodies = int(torch.rand(1).item()*(N_BODIES_UP_TO-1)+1)
-
- if use_thetas:
- # Make a random set of theta parameters.
- theta = (torch.rand(n_bodies, 10) - 0.5) * THETA_SCALE
-
- # Make pi_o parameters from this random theta.
- pi_o = ipc.theta_to_pi_o(theta)
-
- else:
- pi_o = torch.rand(n_bodies, 10) - 0.5
- pi_o[:, 0] = torch.abs(pi_o[:, 0])
-
- # Calculate pi_cm from this set.
- pi_cm = ipc.pi_o_to_pi_cm(pi_o)
-
- # Return the norm difference between theta and pi_o>pi_cm>pi_o.
- return torch.norm(pi_o - ipc.pi_cm_to_pi_o(pi_cm))
-
-
-# Check that theta_to_pi_cm and pi_cm_to_theta are inverses of each other.
-def do_theta_pi_cm_test():
- n_bodies = int(torch.rand(1).item()*(N_BODIES_UP_TO-1)+1)
-
- # Make a random set of theta parameters.
- theta = torch.rand(n_bodies, 10) * THETA_SCALE
-
- # Calculate pi_cm from this random theta.
- pi_cm = ipc.theta_to_pi_cm(theta)
-
- # Return the norm difference between theta and theta>pi_cm>theta.
- return torch.norm(theta - ipc.pi_cm_to_theta(pi_cm))
-
-
-print(f'\n=== Doing theta to pi_o test ===')
-cost = 0
-for _ in range(N_TESTS):
- new_cost = do_theta_pi_o_test().item()
- cost += new_cost
- if VERBOSE:
- print(new_cost)
-print(f'\tAverage difference: {cost/N_TESTS}')
-
-print(f'\n=== Doing pi_o to pi_cm test, starting from thetas ===')
-cost = 0
-for _ in range(N_TESTS):
- new_cost = do_pi_o_cm_test(use_thetas = True).item()
- cost += new_cost
- if VERBOSE:
- print(new_cost)
-print(f'\tAverage difference: {cost/N_TESTS}')
-
-print(f'\n=== Doing pi_o to pi_cm test, not using thetas ===')
-cost = 0
-for _ in range(N_TESTS):
- new_cost = do_pi_o_cm_test(use_thetas = False).item()
- cost += new_cost
- if VERBOSE:
- print(new_cost)
-print(f'\tAverage difference: {cost/N_TESTS}')
-
-print(f'\n=== Doing theta to pi_cm test ===')
-cost = 0
-for _ in range(N_TESTS):
- new_cost = do_theta_pi_cm_test().item()
- cost += new_cost
- if VERBOSE:
- print(new_cost)
-print(f'\tAverage difference: {cost/N_TESTS}')
\ No newline at end of file
diff --git a/dair_pll_old/test/inertia_randomization.py b/dair_pll_old/test/inertia_randomization.py
deleted file mode 100644
index 9c9e5d0..0000000
--- a/dair_pll_old/test/inertia_randomization.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""Test script to help with inertia randomizations."""
-import pdb
-
-import torch
-from torch import Tensor
-from scipy.spatial.transform import Rotation
-
-from dair_pll.geometry import _NOMINAL_HALF_LENGTH
-from dair_pll.inertia import InertialParameterConverter
-
-
-N_TESTS = 1000
-
-bad_runs = 0
-
-for _ in range(N_TESTS):
- pi_cm_params = torch.ones((2,10))
- theta_params = torch.ones((2,10))
-
- # Randomize the inertia.
- for idx in range(theta_params.shape[0]):
- pi_cm = pi_cm_params[idx]
-
- # Let the center of mass be anywhere within the inner half of a
- # nominal geometry.
- mass = pi_cm[0].item()
- pi_cm[1:4] = mass * (torch.rand(3) - 0.5) * _NOMINAL_HALF_LENGTH
-
- # Define the moments of inertia along principal axes assuming a solid
- # block of homogeneous density with random mass and random lengths.
- rand_mass = mass * (torch.rand(1) + 0.5)
- rand_lengths = (torch.rand(3) + 0.5) * _NOMINAL_HALF_LENGTH
- Ixx_pa = (rand_mass/12) * (rand_lengths[1]**2 + rand_lengths[2]**2)
- Iyy_pa = (rand_mass/12) * (rand_lengths[0]**2 + rand_lengths[2]**2)
- Izz_pa = (rand_mass/12) * (rand_lengths[0]**2 + rand_lengths[1]**2)
-
- # Randomly rotate the principal axes.
- rot_mat = Tensor(Rotation.random().as_matrix())
- I_mat_pa = Tensor([[Ixx_pa, 0., 0.],
- [0., Iyy_pa, 0.],
- [0., 0., Izz_pa]])
- I_mat_rand = rot_mat.T @ I_mat_pa @ rot_mat
-
- # Grab the moments and products of inertia from this result.
- Ixx, Iyy, Izz = I_mat_rand[0,0], I_mat_rand[1,1], I_mat_rand[2,2]
- Ixy, Ixz, Iyz = I_mat_rand[0,1], I_mat_rand[1,2], I_mat_rand[1,2]
-
- pi_cm[4:7] = Tensor([Ixx, Iyy, Izz])
- pi_cm[7:10] = Tensor([Ixy, Ixz, Iyz])
-
- pi_cm_params[idx] = pi_cm
-
- theta_params = InertialParameterConverter.pi_cm_to_theta(pi_cm_params)
-
- if torch.any(torch.isnan(theta_params)):
- bad_runs += 1
-
-
-print(f'Failure rate: {bad_runs/N_TESTS*100:.2f}% \nFailures: {bad_runs}')
diff --git a/dair_pll_old/test/loss_calculations.py b/dair_pll_old/test/loss_calculations.py
deleted file mode 100644
index 2ffd2b8..0000000
--- a/dair_pll_old/test/loss_calculations.py
+++ /dev/null
@@ -1,453 +0,0 @@
-"""Test script for comparing loss calculations from PLL and SoPhTER."""
-import torch
-from torch import Tensor
-
-from dair_pll import file_utils
-from dair_pll.dataset_management import DataConfig, DataGenerationConfig
-from dair_pll.drake_experiment import DrakeMultibodyLearnableExperiment, \
- DrakeSystemConfig, \
- MultibodyLearnableSystemConfig, \
- MultibodyLosses
-from dair_pll.experiment import default_epoch_callback
-from dair_pll.experiment_config import SupervisedLearningExperimentConfig, \
- OptimizerConfig
-from dair_pll.multibody_learnable_system import MultibodyLearnableSystem
-from dair_pll.state_space import UniformSampler
-
-
-NQ = 7
-TRUE_CUBE_URDF = 'contactnets_cube.urdf'
-LARGE_CUBE_URDF = 'contactnets_cube_large.urdf'
-SYSTEM_NAME = 'cube'
-
-
-class SoPhTERTensorUtils:
- """Tensor utilities from SoPhTER."""
-
- @staticmethod
- def matrix_diag(diagonal: Tensor) -> Tensor:
- """
- Written by fmassa at: https://github.com/pytorch/pytorch/issues/12160
- """
- N = diagonal.shape[-1]
- shape = diagonal.shape[:-1] + (N, N)
- device, dtype = diagonal.device, diagonal.dtype
- result = torch.zeros(shape, dtype=dtype, device=device)
- indices = torch.arange(result.numel(), device=device).reshape(shape)
- indices = indices.diagonal(dim1=-2, dim2=-1)
- result.view(-1)[indices] = diagonal
- return result
-
- @staticmethod
- def veceye(n: int, veclen: int) -> Tensor:
- """Compute a block diagonal matrix with column vectors of ones as blocks.
- Example:
- veceye(3, 2) =
- tensor([[1., 0., 0.],
- [1., 0., 0.],
- [0., 1., 0.],
- [0., 1., 0.],
- [0., 0., 1.],
- [0., 0., 1.]])
- Args:
- n: number of columns.
- veclen: number of ones in each matrix diagonal block.
- Returns:
- A (n * veclen) x n matrix.
- """
-
- return torch.eye(n).repeat(1, veclen).reshape(n * veclen, n)
-
- @staticmethod
- def pad_right(x: Tensor, elem: float, num: int) -> Tensor:
- """Right pad a batched tensor with an element.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: how many columns filled with elem to add.
- Returns:
- A batch_n x n x (m + num) tensor. The new elements are all filled with elem.
- """
- pad = torch.ones(x.shape[0], x.shape[1], num) * elem
- return torch.cat((x, pad), dim=2)
-
- @staticmethod
- def pad_left(x: Tensor, elem: float, num: int) -> Tensor:
- """Left pad a batched tensor with an element.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: how many columns filled with elem to add.
- Returns:
- A batch_n x n x (num + m) tensor. The new elements are all filled with elem.
- """
- pad = torch.ones(x.shape[0], x.shape[1], num) * elem
- return torch.cat((pad, x), dim=2)
-
- @staticmethod
- def pad_top(x: Tensor, elem: float, num: int) -> Tensor:
- """Top pad a batched tensor with an element.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: how many rows filled with elem to add.
- Returns:
- A batch_n x (num + n) x m tensor. The new elements are all filled with elem.
- """
- pad = torch.ones(x.shape[0], num, x.shape[2]) * elem
- return torch.cat((pad, x), dim=1)
-
- @staticmethod
- def pad_bottom(x: Tensor, elem: float, num: int) -> Tensor:
- """Bottom pad a batched tensor with an element.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: how many rows filled with elem to add.
- Returns:
- A batch_n x (n + num) x m tensor. The new elements are all filled with elem.
- """
- pad = torch.ones(x.shape[0], num, x.shape[2]) * elem
- return torch.cat((x, pad), dim=1)
-
- @staticmethod
- def diag_append(x: Tensor, elem: float, num: int) -> Tensor:
- """Diagonally pad a batched tensor with the identity times an element after the orginal.
- For each batched matrix, make the new matrix block diagonal with the original matrix in the
- upper left corner and eye(num) * elem in the bottom right corner.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: the size of the identity matrix.
- Returns:
- A batch_n x (n + num) x (m + num) tensor. The new elements are all filled with elem.
- """
-
- batch_n = x.shape[0]
-
- brblock = torch.eye(num).unsqueeze(0).repeat(batch_n, 1, 1) * elem
- bottom_zeros = torch.zeros(batch_n, num, x.shape[2])
- bottom_block = torch.cat((bottom_zeros, brblock), dim=2)
-
- x = pad_right(x, 0, num)
- x = torch.cat((x, bottom_block), dim=1)
- return x
-
- @staticmethod
- def diag_prepend(x: Tensor, elem: float, num: int) -> Tensor:
- """Diagonally pad a batched tensor with the identity times an element before the orginal.
- For each batched matrix, make the new matrix block diagonal with eye(num) * elem in the
- upper left corner and the original matrix in the bottom right corner.
- Args:
- x: batch_n x n x m.
- elem: element to pad with.
- num: the size of the identity matrix.
- Returns:
- A batch_n x (num + n) x (num + m) tensor. The new elements are all filled with elem.
- """
- batch_n = x.shape[0]
-
- tlblock = torch.eye(num).unsqueeze(0).repeat(batch_n, 1, 1) * elem
- top_zeros = torch.zeros(batch_n, num, x.shape[2])
- top_block = torch.cat((tlblock, top_zeros), dim=2)
-
- x = pad_left(x, 0, num)
- x = torch.cat((top_block, x), dim=1)
- return x
-
- @staticmethod
- def robust_sqrt(out_squared: Tensor, eps = 1e-8) -> Tensor:
- # TODO: write description
- out = torch.zeros(out_squared.shape)
- out_big = out_squared >= eps ** 2
- out_small = torch.logical_not(out_big)
- out[out_big] = torch.sqrt(out_squared[out_big])
- out[out_small] = out_squared[out_small] * 0.5 / eps + 0.5 * eps
- return out
-
- @staticmethod
- def block_diag(m):
- """
- Make a block diagonal matrix along dim=-3
- EXAMPLE:
- block_diag(torch.ones(4,3,2))
- should give a 12 x 8 matrix with blocks of 3 x 2 ones.
- Prepend batch dimensions if needed.
- You can also give a list of matrices.
- :type m: torch.Tensor, list
- :rtype: torch.Tensor
- """
- if type(m) is list:
- # Remove Nones from list
- m = utils.filter_none(m)
-
- m = torch.cat([m1.unsqueeze(-3) for m1 in m], -3)
-
- d = m.dim()
- n = m.shape[-3]
- siz0 = m.shape[:-3]
- siz1 = m.shape[-2:]
- m2 = m.unsqueeze(-2)
- eye = attach_dim(torch.eye(n).unsqueeze(-2), d - 3, 1)
- return (m2 * eye).reshape(siz0 + torch.Size(torch.tensor(siz1) * n))
-
- @staticmethod
- def attach_dim(v, n_dim_to_prepend=0, n_dim_to_append=0):
- return v.reshape(
- torch.Size([1] * n_dim_to_prepend) + v.shape + torch.Size([1] * n_dim_to_append))
-
-
-
-def sophter_loss(x: Tensor, u: Tensor, x_plus: Tensor):
- sp = self.system.params
- config = self.config
- poly = self.interaction.poly
-
- # Get configuration / velocity for polygon
- configuration = x[..., :NQ] #poly.get_configuration_previous() # noqa
- configuration_plus = x_plus[..., :NQ] #poly.get_configuration() # noqa
- velocity = x[..., NQ:] #poly.get_velocity_previous()
- velocity_plus = x_plus[..., NQ:] #poly.get_velocity()
- control = u #poly.get_control_previous()
-
- batch_n = self.interaction.batch_n()
- bases_n = self.G_bases.shape[0]
- k = self.interaction.contact_n()
-
- G = self.compute_G()
-
- phi = self.interaction.compute_phi_previous()
- phi_plus = self.interaction.compute_phi_history()
-
- Jn = self.interaction.compute_Jn_previous()
- Jt_tilde = self.interaction.compute_Jt_tilde_previous()
- J_tilde = torch.cat((Jn, Jt_tilde), dim=1)
-
- E_2 = SoPhTERTensorUtils.veceye(k, 2).unsqueeze(0).repeat(batch_n, 1, 1)
-
- gamma = self.interaction.compute_gamma_previous()
- f = poly.compute_f_previous(sp)
-
- M = self.interaction.compute_M_previous()
- M_i = self.interaction.compute_M_i_previous()
-
- F_data = M.bmm(velocity_plus - f)
-
- # Optimization variables are lambda_n, lambda_t
- def normal_mat_pad(x): return SoPhTERTensorUtils.diag_prepend(x, 0, k)
- def normal_vec_pad(x): return SoPhTERTensorUtils.pad_left(x, 0, k)
-
- def tangent_mat_pad(x): return SoPhTERTensorUtils.diag_append(x, 0, 2 * k)
- def tangent_vec_pad(x): return SoPhTERTensorUtils.pad_right(x, 0, 2 * k)
-
-
- # lambda_n and phi complementarity
- comp_n_A = config.w_comp_n * SoPhTERTensorUtils.matrix_diag((phi_plus.squeeze(2) ** 2))
- comp_n_A = tangent_mat_pad(comp_n_A)
-
-
- # lambda_t and phi complementarity
- phi_expand = E_2.bmm(phi_plus)
- comp_t_A = config.w_comp_t * SoPhTERTensorUtils.matrix_diag((phi_expand.squeeze(2) ** 2))
- comp_t_A = normal_mat_pad(comp_t_A)
-
-
- # Match impulse data (multiply by M_i to get scaling)
- # Term of form (M_i gamma^T [Jn, Jt_tilde]^T lambda - M_i F)^2
- match_quad_A = M_i.bmm(gamma.transpose(1, 2)).bmm(J_tilde.transpose(1, 2))
- match_quad_b = M_i.bmm(F_data)
- match_A = config.w_match * match_quad_A.transpose(1, 2).bmm(match_quad_A)
- match_b = config.w_match * (-2) * match_quad_b.transpose(1, 2).bmm(match_quad_A)
- match_c = config.w_match * match_quad_b.transpose(1, 2).bmm(match_quad_b)
-
- # Friction cone boundary
- sliding_vels = Jt_tilde.bmm(gamma).bmm(velocity_plus)
- cone_normal_mat = SoPhTERTensorUtils.matrix_diag(sliding_vels.squeeze(2)).bmm(E_2)
-
- sliding_vel_norms = E_2.transpose(1, 2).bmm(sliding_vels.mul(sliding_vels))
-
- if config.robust_sqrt:
- sliding_vel_norms = E_2.bmm(SoPhTERTensorUtils.robust_sqrt(sliding_vel_norms))
- else:
- sliding_vel_norms = E_2.bmm(torch.sqrt(sliding_vel_norms))
-
- cone_tangent_mat = SoPhTERTensorUtils.matrix_diag(sliding_vel_norms.squeeze(2))
- cone_mat = torch.cat((cone_normal_mat, cone_tangent_mat), dim=2)
-
- cone_A = config.w_cone * cone_mat.transpose(1, 2).bmm(cone_mat)
-
-
- A = comp_n_A + comp_t_A + match_A + cone_A
- b = match_b
- c = match_c
-
-
- try:
- full_sol = self.qcqp(2 * A, b.transpose(1,2), torch.rand(b.transpose(1,2).size()), 1e-10, 10000)
- if torch.any(torch.isnan(full_sol)):
- return [torch.tensor([[[0.0]]])]
- except Exception:
- print('LCQP solve fail')
- return [torch.tensor([[[0.0]]])]
-
- # sum in case batch_n > 1
- qp_loss = utils.compute_quadratic_loss(A, b, c, full_sol).sum()
- contact_mask = torch.norm(F_data, 2, dim=1).unsqueeze(2) > config.w_contact_threshold
- qp_loss = qp_loss * contact_mask.int()
- qp_loss = qp_loss.sum()
- b_zero = torch.zeros(batch_n, 1, 3 * k)
- c_zero = torch.zeros(batch_n, 1, 1)
-
-
- loss_terms = [utils.compute_quadratic_loss(comp_n_A, b_zero, c_zero, full_sol).sum(),
- utils.compute_quadratic_loss(comp_t_A, b_zero, c_zero, full_sol).sum(),
- utils.compute_quadratic_loss(match_A, match_b, match_c, full_sol).sum(),
- utils.compute_quadratic_loss(cone_A, b_zero, c_zero, full_sol).sum()]
-
-
- regularizers = []
-
- ##### penalize penetration:
- def phi_penalizer(phi): return torch.sum(torch.clamp(-phi, min=0) ** 2)
- pen_loss = config.w_penetration * phi_penalizer(phi_plus)
- regularizers.append(pen_loss)
-
-
- ##### constrain config grad normal:
- # L1 cost constraining phi norms w.r.t configuration to be one
- pos_norms = torch.norm(Jn[:, :, 0:3], dim=2)
- grad_normal_loss = config.w_config_grad_normal * \
- ((pos_norms - torch.ones(pos_norms.shape)) ** 2).sum()
- regularizers.append(grad_normal_loss)
-
- ##### constrain config grad tangent:
- # L1 cost constraining phi_t norms w.r.t configuration to be one
- # NOTE: THIS IS BROKEN, DOESN'T NEED TO BE UNIT, NEEDS TO BE MU!!
- regularizers.append(torch.tensor(0.0))
-
-
- ##### constrain config grad perp:
- # L1 cost constraining phi_t norms perpendicular to phi norms
- if torch.norm(Jt_tilde) == 0.0:
- grad_perp_loss = torch.tensor(0.0)
- else:
- def normalize(vecs: Tensor) -> Tensor:
- norms = vecs.norm(dim=2).unsqueeze(2).repeat(1, 1, 3)
- return vecs / norms
- pos_normals = normalize(Jn[:, :, 0:3])
- pos_normals = pos_normals.repeat(1, 1, 2).reshape(batch_n, k * 2, 3)
- pos_tangents = normalize(Jt_tilde[:, :, 0:3])
-
- grad_perp_loss = config.w_config_grad_perp * \
- ((pos_normals * pos_tangents).sum(dim=2) ** 2).sum()
- regularizers.append(grad_perp_loss)
-
-
- ##### constrain st estimate normal:
- # L2 cost on phi_plus_hat deviating from phi_plus
- phi_plus_hat = phi + sp.dt * Jn.bmm(gamma).bmm(velocity_plus)
- st_pen_loss = config.w_st_estimate_pen * \
- torch.sum(torch.clamp(-phi_plus_hat, min=0) ** 2) # /batch_n
- regularizers.append(st_pen_loss)
-
- phi_norm = (torch.norm(phi_plus - phi_plus_hat, dim=1) ** 2).sum() # /batch_n
- st_normal_loss = config.w_st_estimate_normal * phi_norm
- regularizers.append(st_normal_loss)
-
-
- ##### constrain st estimate tangent:
- phi_t = self.interaction.compute_phi_t_previous()
- phi_t_plus = self.interaction.compute_phi_t_history()
- phi_t_plus_hat = phi_t + sp.dt * Jt_tilde.bmm(gamma).bmm(velocity_plus)
- phi_t_norm = (torch.norm(phi_t_plus - phi_t_plus_hat, dim=1) ** 2).sum()
- st_tangent_loss = config.w_st_estimate_tangent * phi_t_norm
- regularizers.append(st_tangent_loss)
-
- # Penalize second derivative of tangent jacobian
- Jt_tilde_plus = self.interaction.compute_Jt_tilde_history()
- delta_vc = (Jt_tilde_plus - Jt_tilde).bmm(gamma).bmm(velocity_plus)
- vc_norm = (torch.norm(delta_vc, dim=1) ** 2).sum()
- tangent_jac_d2_loss = config.w_tangent_jac_d2 * vc_norm
- regularizers.append(tangent_jac_d2_loss)
-
- total_loss = qp_loss + 0 # Make new variable by adding 0
- for regularizer in regularizers:
- total_loss = total_loss + regularizer
-
- return [total_loss, qp_loss] + loss_terms + regularizers
-
-
-def pll_loss(x: Tensor, u: Tensor, x_plus: Tensor) ->
- Tuple[Tensor, Tensor, Tensor, Tensor]:
- pass
-
-
-def get_terms(x_plus: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
- q_plus = x_plus[..., :NQ]
- v_plus = x_plus[..., NQ:]
-
- # Begin loss calculation.
- delassus, M, J, phi, non_contact_acceleration = self.multibody_terms(
- q_plus, v_plus, u)
-
-
-def create_pll_experiment():
- # Describes the optimizer settings.
- optimizer_config = OptimizerConfig()
- optimizer_config.batch_size.value = 1
-
- # Describes the ground truth system.
- urdfs = {SYSTEM_NAME: file_utils.get_asset(TRUE_CUBE_URDF)}
- base_config = DrakeSystemConfig(urdfs=urdfs)
-
- # Describes the learnable system.
- learnable_config = MultibodyLearnableSystemConfig(
- urdfs={SYSTEM_NAME: file_utils.get_asset(LARGE_CUBE_URDF)},
- loss=MultibodyLosses.CONTACTNETS_LOSS,
- inertia_mode=0)
-
- # Describe data source
- data_generation_config = None
- import_directory = None
- dynamic_updates_from = None
- x_0 = X_0S[system]
- if simulation:
- pass
- elif real:
- # otherwise, specify directory with [T, n_x] tensor files saved as
- # 0.pt, 1.pt, ...
- # See :mod:`dair_pll.state_space` for state format.
- import_directory = file_utils.get_asset(data_asset)
- print(f'Getting real trajectories from {import_directory}\n')
- else:
- dynamic_updates_from = DYNAMIC_UPDATES_FROM
-
- # Describes configuration of the data
- data_config = DataConfig(
- storage=storage_name,
- # where to store data
- dt=DT,
- train_fraction=1.0 if dynamic else 0.5,
- valid_fraction=0.0 if dynamic else 0.25,
- test_fraction=0.0 if dynamic else 0.25,
- generation_config=data_generation_config,
- import_directory=import_directory,
- dynamic_updates_from=dynamic_updates_from,
- t_prediction=1 if contactnets else T_PREDICTION,
- n_import=dataset_size if real else None)
-
- # Combines everything into config for entire experiment.
- experiment_config = SupervisedLearningExperimentConfig(
- base_config=base_config,
- learnable_config=learnable_config,
- optimizer_config=optimizer_config,
- data_config=data_config,
- full_evaluation_period=EPOCHS if dynamic else 1,
- # full_evaluation_samples=dataset_size, # use all available data for eval
- run_tensorboard=tb,
- gen_videos=videos,
- update_geometry_in_videos=True
- )
-
- # Makes experiment.
- experiment = DrakeMultibodyLearnableExperiment(experiment_config)
\ No newline at end of file
diff --git a/dair_pll_old/test/startup_test.bash b/dair_pll_old/test/startup_test.bash
deleted file mode 100644
index ee41c8c..0000000
--- a/dair_pll_old/test/startup_test.bash
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-#SBATCH --gpus=0
-#SBATCH --mem-per-cpu=10G
-#SBATCH --time=24:00:00
-#SBATCH --qos=low
-#SBATCH --job-name=pll_t0
-#SBATCH --output=/mnt/beegfs/scratch/bibit/pll_env/dair_pll/logs/slurm_t0.txt
-#SBATCH --exclude=node-3090-1,node-3090-2,node-3090-3,node-1080ti-0,node-2080ti-7,node-v100-0
-
-echo "display" >> /mnt/beegfs/scratch/bibit/pll_env/dair_pll/logs/start_t0.txt
-#Xvfb :6 -screen 0 800x600x24 &
-source /mnt/beegfs/scratch/bibit/pll_env/dair_pll/../bin/activate;
-export PYTHONPATH=/mnt/beegfs/scratch/bibit/pll_env/dair_pll;
-export DISPLAY=:5;
-export XDG_RUNTIME_DIR=/mnt/beegfs/scratch/bibit/tmp;
-
-
-echo "meshcat server"
-meshcat-server &
-
-echo "open meshcat browswer in screen"
-xvfb-run --server-num="$SLURM_JOBID" --server-args="-screen 0 800x600x24" /mnt/beegfs/scratch/bibit/firefox/firefox http://127.0.0.1:7000/static/ &
-
-
-echo "train"
-python /mnt/beegfs/scratch/bibit/pll_env/dair_pll/examples/contactnets_simple.py t0 --source real &> /mnt/beegfs/scratch/bibit/pll_env/dair_pll/logs/train_t0.txt
-
-echo "killing meshcat server and firefox"
-kill %%
diff --git a/dair_pll_old/test/tb_test.py b/dair_pll_old/test/tb_test.py
deleted file mode 100644
index 9bbbde7..0000000
--- a/dair_pll_old/test/tb_test.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import os
-import os.path as op
-import git
-import click
-import time
-import multiprocessing
-
-
-@click.command()
-@click.argument('name')
-@click.option('--local/--cluster',
- default=False,
- help="whether running script locally or on cluster.")
-def main_command(name: str, local: bool):
- # get the git repository folder
- repo = git.Repo(search_parent_directories=True)
- git_folder = repo.git.rev_parse("--show-toplevel")
- git_folder = op.normpath(git_folder)
-
- # get the tensorboard bash script
- tb_script = op.join(git_folder, 'examples', 'tensorboard.bash')
-
- # make a tensorboard log file
- tb_logfile = op.join(git_folder, 'logs', 'tensorboard_' + name + '.txt')
- os.system(f'rm {tb_logfile}')
- tb_folder = op.join(git_folder, 'results', name, 'tensorboard')
-
- # make and start tensorboard command
- if local:
- tboard_cmd = f'bash {tb_script} {tb_folder} {name} >> {tb_logfile}'
- else:
- tboard_cmd = f'sbatch --output={tb_logfile} --job-name=tb_{name} {tb_script} {tb_folder} {name}'
-
- print(f'\ntboard_cmd:\n{tboard_cmd}\n')
-
- thread = multiprocessing.Process(target=os.system, args=(tboard_cmd,))
- thread.start()
-
- # wait for tensorboard url
- print('Waiting on TensorBoard startup ...')
- lines = []
- while not op.exists(tb_logfile):
- time.sleep(0.1)
- while len(lines) < 1:
- with open(tb_logfile) as f:
- lines = f.readlines()
- time.sleep(1.0)
- print('')
- print(f'TensorBoard running on {lines[0]}')
- print('')
- print('Running training setup')
-
-
-
-if __name__ == '__main__':
- main_command()
diff --git a/dair_pll_old/test/test_drake_simulator.py b/dair_pll_old/test/test_drake_simulator.py
deleted file mode 100644
index b0792b2..0000000
--- a/dair_pll_old/test/test_drake_simulator.py
+++ /dev/null
@@ -1,602 +0,0 @@
-import argparse
-import datetime
-import matplotlib.pyplot as plt
-import numpy as np
-import os
-import random
-import time
-import yaml
-import sys
-import torch
-from scipy.spatial.transform import Rotation as R
-import pydrake
-from pydrake.all import StartMeshcat, RandomGenerator, BodyIndex, Parser
-from pydrake.common import FindResourceOrThrow
-from pydrake.common.eigen_geometry import Quaternion, AngleAxis, Isometry3
-from pydrake.geometry import (
- Box,
- HalfSpace,
- SceneGraph,
- Sphere,
-)
-from pydrake.math import (RollPitchYaw, RotationMatrix, RigidTransform)
-from pydrake.multibody.tree import (
- PrismaticJoint,
- SpatialInertia,
- UniformGravityFieldElement,
- UnitInertia,
- world_model_instance
-)
-from pydrake.multibody.math import SpatialVelocity
-from pydrake.multibody.plant import (
- AddMultibodyPlantSceneGraph,
- CoulombFriction,
- MultibodyPlant
-)
-
-from pydrake.forwarddiff import gradient
-from pydrake.multibody.parsing import Parser
-from pydrake.multibody.inverse_kinematics import InverseKinematics
-from pydrake.systems.analysis import Simulator
-from pydrake.systems.framework import DiagramBuilder
-from pydrake.geometry import MeshcatVisualizer, MeshcatVisualizerParams
-
-"""Simulate a cube toss using the trajectory from the learned model.
-"""
-
-urdf_file = "../assets/contactnets_cube.urdf"
-def AddShape(plant, shape, name, mass=1, mu=1, com=np.array([0.0, 0.0, 0.0]), inertia=UnitInertia(), color=[0.5, 0.5, 0.9, 1.0]):
- instance = plant.AddModelInstance(name)
- body = plant.AddRigidBody(
- name,
- instance,
- SpatialInertia(
- mass=mass, p_PScm_E=com, G_SP_E=inertia
- ),
- )
- if plant.geometry_source_is_registered():
- if isinstance(shape, Box):
- plant.RegisterCollisionGeometry(
- body,
- RigidTransform(),
- Box(
- shape.width() - 0.001,
- shape.depth() - 0.001,
- shape.height() - 0.001,
- ),
- name,
- CoulombFriction(mu, mu),
- )
- i = 0
- for x in [-shape.width() / 2.0, shape.width() / 2.0]:
- for y in [-shape.depth() / 2.0, shape.depth() / 2.0]:
- for z in [-shape.height() / 2.0, shape.height() / 2.0]:
- plant.RegisterCollisionGeometry(
- body,
- RigidTransform([x, y, z]),
- Sphere(radius=1e-7),
- f"contact_sphere{i}",
- CoulombFriction(mu, mu),
- )
- i += 1
- else:
- plant.RegisterCollisionGeometry(
- body, RigidTransform(), shape, name, CoulombFriction(mu, mu)
- )
-
- plant.RegisterVisualGeometry(
- body, RigidTransform(), shape, name, color
- )
-
- return instance
-
-def AddGround(plant):
- ground_instance = plant.AddModelInstance("ground")
- world_body = plant.world_body()
- ground_shape = Box(10., 10., 10.)
- ground_body = plant.AddRigidBody("ground", ground_instance, SpatialInertia(
- mass=10.0, p_PScm_E=np.array([0., 0., 0.]),
- G_SP_E=UnitInertia(1.0, 1.0, 1.0)))
- plant.WeldFrames(world_body.body_frame(), ground_body.body_frame(),
- RigidTransform(Isometry3(rotation=np.eye(3), translation=[0, 0, -5])))
- plant.RegisterVisualGeometry(
- ground_body, RigidTransform.Identity(), ground_shape, "ground_vis",
- np.array([0.5, 0.5, 0.5, 1.]))
- plant.RegisterCollisionGeometry(
- ground_body, RigidTransform.Identity(), ground_shape, "ground_col",
- CoulombFriction(0.9, 0.8))
-
-def simulate_cube_toss(params, trajectory_dir):
- """Simulate based on physical params
-
- Args:
- params (Dict): _description_
- trajectory_dir (str): _description_
- """
- if params == None:
- cube_body_m = 0.37
- cube_body_com_x = 0
- cube_body_com_y = 0
- cube_body_com_z = 0
- cube_body_Ixx = 0.00081
- cube_body_Iyy = 0.00081
- cube_body_Izz = 0.00081
- cube_body_Ixy = 0.0
- cube_body_Ixz = 0.0
- cube_body_Iyz = 0.0
- cube_body_mu = 0.15
- cube_body_len_x = 0.1048
- cube_body_len_y = 0.1048
- cube_body_len_z = 0.1048
- else:
- cube_body_m = params['cube_body_m']
- cube_body_com_x = params['cube_body_com_x']
- cube_body_com_y = params['cube_body_com_y']
- cube_body_com_z = params['cube_body_com_z']
- cube_body_Ixx = params['cube_body_I_xx']
- cube_body_Iyy = params['cube_body_I_yy']
- cube_body_Izz = params['cube_body_I_zz']
- cube_body_Ixy = params['cube_body_I_xy']
- cube_body_Ixz = params['cube_body_I_xz']
- cube_body_Iyz = params['cube_body_I_yz']
- cube_body_mu = params['cube_body_mu']
- cube_body_len_x = params['cube_body_len_x']
- cube_body_len_y = params['cube_body_len_y']
- cube_body_len_z = params['cube_body_len_z']
-
- # np.random.seed(42)
- # random.seed(42)
- # rng = np.random.default_rng(135) # this is for python
- # generator = RandomGenerator(rng.integers(0, 1000))
- builder = DiagramBuilder()
- plant, scene_graph = AddMultibodyPlantSceneGraph(
- builder, MultibodyPlant(time_step=0.0001))
- shape = Box(cube_body_len_x, cube_body_len_y, cube_body_len_z)
- name = 'cube'
- com = np.array([cube_body_com_x, cube_body_com_y, cube_body_com_z])
- inertia = UnitInertia(cube_body_Ixx, cube_body_Iyy, cube_body_Izz, cube_body_Ixy, cube_body_Ixz, cube_body_Iyz)
- AddShape(plant, shape, name, mass=cube_body_m, mu=cube_body_mu, com=com, inertia=inertia, color=[0.5, 0.5, 0.9, 1.0])
- AddGround(plant)
- plant.Finalize()
-
- meshcat = StartMeshcat()
- params = MeshcatVisualizerParams()
- visualizer = MeshcatVisualizer.AddToBuilder(builder, scene_graph, meshcat, params)
-
- diagram = builder.Build()
- diagram_context = diagram.CreateDefaultContext()
- plant_context = diagram.GetMutableSubsystemContext(plant, diagram_context)
- sg_context = diagram.GetMutableSubsystemContext(scene_graph, diagram_context)
- q0 = plant.GetPositions(plant_context).copy()
- v0 = plant.GetVelocities(plant_context).copy()
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- traj = torch.load(trajectory_dir)[0, :] #q_t(wxyz), p_t, w_t, dp_t
- print(f'traj: {traj.size()}')
- # print(type(traj[4:7]))
- # print(traj[4:7].shape)
- p_t = traj[:3]
- q_t = traj[3:7]
- dp_t = traj[7:10]
- w_t = traj[10:]
- print(q_t.shape, p_t.shape, dp_t.shape, w_t.shape)
- rot = RotationMatrix(R.from_quat(traj[3:7]).as_matrix())
- pose = RigidTransform(rot, traj[:3].numpy())
- vel = SpatialVelocity(traj[10:13].numpy(), traj[7:10].numpy())
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel, plant_context)
- simulator = Simulator(diagram, diagram_context)
- simulator.set_target_realtime_rate(1.0)
- simulator.set_publish_every_time_step(False)
- q0_final = plant.GetPositions(plant_context).copy()
- print(q0, q0_final)
- while True:
- simulator.AdvanceTo(simulator.get_context().get_time() + 2.0)
- cube_position = plant.EvalBodyPoseInWorld(plant_context, plant.get_body(BodyIndex(0))).translation()
- print("Reinitializing...")
- if cube_position[2] <= 0.01:
- # The cube has landed, so reset the simulation
- simulator.get_mutable_context().SetTime(0.)
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel, plant_context)
- simulator.Initialize()
-
-def simulate_cube_toss_with_traj(params, trajectory_dir):
- """Simulate a trajectory
-
- Args:
- params (Dict): _description_
- trajectory_dir (str): _description_
- """
- print(f'Reading traj from {trajectory_dir}')
- if params == None:
- cube_body_m = 0.37
- cube_body_com_x = 0
- cube_body_com_y = 0
- cube_body_com_z = 0
- cube_body_Ixx = 0.00081
- cube_body_Iyy = 0.00081
- cube_body_Izz = 0.00081
- cube_body_Ixy = 0.0
- cube_body_Ixz = 0.0
- cube_body_Iyz = 0.0
- cube_body_mu = 0.15
- cube_body_len_x = 0.1048
- cube_body_len_y = 0.1048
- cube_body_len_z = 0.1048
- else:
- cube_body_m = params['cube_body_m']
- cube_body_com_x = params['cube_body_com_x']
- cube_body_com_y = params['cube_body_com_y']
- cube_body_com_z = params['cube_body_com_z']
- cube_body_Ixx = params['cube_body_I_xx']
- cube_body_Iyy = params['cube_body_I_yy']
- cube_body_Izz = params['cube_body_I_zz']
- cube_body_Ixy = params['cube_body_I_xy']
- cube_body_Ixz = params['cube_body_I_xz']
- cube_body_Iyz = params['cube_body_I_yz']
- cube_body_mu = params['cube_body_mu']
- cube_body_len_x = params['cube_body_len_x']
- cube_body_len_y = params['cube_body_len_y']
- cube_body_len_z = params['cube_body_len_z']
- traj = torch.load(trajectory_dir) #q_t(wxyz), p_t, w_t, dp_t
- print(f'traj loaded: {traj.size()}') #N,13
- p_t = traj[:,4:7].numpy() #N,3
- q_t = traj[:,:4].numpy() #N,4, w,x,y,z
- q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
- dp_t = traj[:,10:].numpy() #N,3
- w_t = traj[:,7:10].numpy() #N,3, in body frame
- w_t_world = np.zeros_like(w_t)
- for i in range(traj.shape[0]):
- rot = R.from_quat(q_t_shuffled[i]).as_matrix()
- w_t_i = w_t[i]
- w_t_world[i] = rot @ w_t_i.T
- # w_t_world = R.from_quat(q_t_shuffled).as_matrix() @ w_t.T
- print(p_t.shape, q_t_shuffled.shape, dp_t.shape, w_t_world.shape)
-
- builder = DiagramBuilder()
- plant, scene_graph = AddMultibodyPlantSceneGraph(
- builder, MultibodyPlant(time_step=0.0001))
- shape = Box(cube_body_len_x, cube_body_len_y, cube_body_len_z)
- name = 'cube'
- com = np.array([cube_body_com_x, cube_body_com_y, cube_body_com_z])
- inertia = UnitInertia(cube_body_Ixx, cube_body_Iyy, cube_body_Izz, cube_body_Ixy, cube_body_Ixz, cube_body_Iyz)
- AddShape(plant, shape, name, mass=cube_body_m, mu=cube_body_mu, com=com, inertia=inertia, color=[0.5, 0.5, 0.9, 1.0])
- # AddGround(plant)
- # add ground at z=0
- halfspace_transform = RigidTransform()
- friction = CoulombFriction(1.0, 1.0)
- plant.RegisterCollisionGeometry(plant.world_body(), halfspace_transform,
- HalfSpace(), "ground", friction)
- plant.Finalize()
-
- meshcat = StartMeshcat()
- params = MeshcatVisualizerParams()
- visualizer = MeshcatVisualizer.AddToBuilder(builder, scene_graph, meshcat, params)
-
- diagram = builder.Build()
- diagram_context = diagram.CreateDefaultContext()
- plant_context = diagram.GetMutableSubsystemContext(plant, diagram_context)
- sg_context = diagram.GetMutableSubsystemContext(scene_graph, diagram_context)
- q0 = plant.GetPositions(plant_context).copy()
- v0 = plant.GetVelocities(plant_context).copy()
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- simulator = Simulator(diagram, diagram_context)
- simulator.set_target_realtime_rate(1.0)
- simulator.set_publish_every_time_step(False)
- # Set initial system states
- p_t_0 = p_t[0]
- q_t_0 = q_t_shuffled[0]
- dp_t_0 = dp_t[0]
- w_t_0 = w_t_world[0]
- rot_0 = RotationMatrix(R.from_quat(q_t_0).as_matrix())
- pose_0 = RigidTransform(rot_0, p_t_0)
- vel_0 = SpatialVelocity(w_t_0, dp_t_0)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_0)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_0, plant_context)
- simulator_time = 0.0
- time_step = 0.01
- trajectory_length = len(q_t)
- playback_speed = 1.0
- while True:
- for i in range(1, trajectory_length):
- print(f'z position: {p_t[i, -1]}')
- target_time = i * time_step
- if simulator_time < target_time:
- simulator.AdvanceTo(target_time)
- simulator_time = simulator.get_context().get_time()
- q_t_i = q_t_shuffled[i]
- p_t_i = p_t[i]
- dp_t_i = dp_t[i]
- w_t_i = w_t_world[i]
- rot_i = RotationMatrix(R.from_quat(q_t_i).as_matrix())
- pose_i = RigidTransform(rot_i, p_t_i)
- vel_i = SpatialVelocity(w_t_i, dp_t_i)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_i)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_i, plant_context)
- time.sleep(time_step / playback_speed)
- print("Reinitializing...")
- simulator_time = 0.0
- simulator.get_mutable_context().SetTime(0.)
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- p_t_0 = p_t[0]
- q_t_0 = q_t_shuffled[0]
- dp_t_0 = dp_t[0]
- w_t_0 = w_t_world[0]
- rot_0 = RotationMatrix(R.from_quat(q_t_0).as_matrix())
- pose_0 = RigidTransform(rot_0, p_t_0)
- vel_0 = SpatialVelocity(w_t_0, dp_t_0)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_0)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_0, plant_context)
- simulator.Initialize()
-
-def simulate_cube_and_franka(trajectory_dir):
-
- traj = torch.load(trajectory_dir) #q_t(wxyz), p_t, w_t, dp_t
- print(f'traj loaded: {traj.size()}') #N,13
- p_t = traj[:,4:7].numpy() #N,3
- q_t = traj[:,:4].numpy() #N,4, w,x,y,z
- # q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
- dp_t = traj[:,10:].numpy() #N,3
- w_t = traj[:,7:10].numpy() #N,3, in body frame
- w_t_world = np.zeros_like(w_t)
- for i in range(traj.shape[0]):
- rot = R.from_quat(q_t[i]).as_matrix()
- w_t_i = w_t[i]
- w_t_world[i] = rot @ w_t_i.T
- # w_t_world = R.from_quat(q_t_shuffled).as_matrix() @ w_t.T
- print(p_t.shape, q_t.shape, dp_t.shape, w_t_world.shape)
- traj = np.concatenate((q_t, p_t, w_t, dp_t), axis=1) #N,13
- meshcat = StartMeshcat()
- builder = DiagramBuilder()
- plant, scene_graph = AddMultibodyPlantSceneGraph(builder, time_step=0.0)
- X_model = RigidTransform.Identity()
- parser = Parser(plant)
- model_file = FindResourceOrThrow(
- "drake/manipulation/models/franka_description/urdf/panda_arm_hand_wide_finger.urdf"
- )
- cube_model_file = FindResourceOrThrow("drake/../../../../../dair_pll_latest/assets/contactnets_cube_sim.urdf")
- model = parser.AddModelFromFile(model_file)
- cube_model = parser.AddModelFromFile(cube_model_file)
-
- # plant.WeldFrames(
- # plant.world_frame(),
- # plant.GetFrameByName("body", cube_model),
- # X_model,
- # )
-
- plant.WeldFrames(
- plant.world_frame(),
- plant.GetFrameByName("panda_link0", model),
- X_model,
- )
- plant.Finalize()
- params = MeshcatVisualizerParams()
- visualizer = MeshcatVisualizer.AddToBuilder(
- builder, scene_graph, meshcat, params
- )
- diagram = builder.Build()
- context = diagram.CreateDefaultContext()
- plant_context = plant.GetMyMutableContextFromRoot(context)
- q0 = plant.GetPositions(plant_context).copy()
- v0 = plant.GetVelocities(plant_context).copy()
- plant.get_actuation_input_port().FixValue(plant_context, np.zeros(9))
- simulator = Simulator(diagram, context)
- simulator.set_target_realtime_rate(1.0)
- simulator.set_publish_every_time_step(False)
- simulator_time = 0.0
- time_step = 0.01
- trajectory_length = len(q_t)
- playback_speed = 1.0
- while True:
- for i in range(1, trajectory_length):
- target_time = i * time_step
- if simulator_time < target_time:
- simulator.AdvanceTo(target_time)
- simulator_time = simulator.get_context().get_time()
-
- q_drake = traj[i, :7]
- v_drake = traj[i, 7:]
- print(plant.num_positions(cube_model))
- print(plant.num_velocities(cube_model))
- print(q_drake.shape, v_drake.shape)
- plant.SetPositions(plant_context, cube_model, q_drake)
- plant.SetVelocities(plant_context, cube_model, v_drake)
- time.sleep(time_step / playback_speed)
- print("Reinitializing...")
- simulator_time = 0.0
- simulator.get_mutable_context().SetTime(0.)
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- q_drake = traj[0, :7]
- v_drake = traj[0, 7:]
- plant.SetPositions(plant_context, cube_model, q_drake)
- plant.SetVelocities(plant_context, cube_model, v_drake)
- simulator.Initialize()
-
-def simulate_toss_with_traj(trajectory_dir, urdf_dir):
- builder = DiagramBuilder()
- plant, scene_graph = AddMultibodyPlantSceneGraph(
- builder, MultibodyPlant(time_step=0.0001))
- X_model = RigidTransform.Identity()
- parser_ = Parser(plant)
- model_file = FindResourceOrThrow(urdf_dir)
- model = parser_.AddModelFromFile(model_file)
- plant.Finalize()
-
- traj = torch.load(trajectory_dir) #q_t(wxyz), p_t, w_t, dp_t
- print(f'traj loaded: {traj.size()}') #N,13
- p_t = traj[:,4:7].numpy() #N,3
- q_t = traj[:,:4].numpy() #N,4, w,x,y,z
- q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
- dp_t = traj[:,10:].numpy() #N,3
- w_t = traj[:,7:10].numpy() #N,3, in body frame
- w_t_world = np.zeros_like(w_t)
- for i in range(traj.shape[0]):
- rot = R.from_quat(q_t_shuffled[i]).as_matrix()
- w_t_i = w_t[i]
- w_t_world[i] = rot @ w_t_i.T
- # w_t_world = R.from_quat(q_t_shuffled).as_matrix() @ w_t.T
- print(p_t.shape, q_t_shuffled.shape, dp_t.shape, w_t_world.shape)
-
- meshcat = StartMeshcat()
- params = MeshcatVisualizerParams()
- visualizer = MeshcatVisualizer.AddToBuilder(builder, scene_graph, meshcat, params)
- diagram = builder.Build()
- diagram_context = diagram.CreateDefaultContext()
- plant_context = diagram.GetMutableSubsystemContext(plant, diagram_context)
- sg_context = diagram.GetMutableSubsystemContext(scene_graph, diagram_context)
- q0 = plant.GetPositions(plant_context).copy()
- v0 = plant.GetVelocities(plant_context).copy()
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- simulator = Simulator(diagram, diagram_context)
- simulator.set_target_realtime_rate(1.0)
- simulator.set_publish_every_time_step(True)
- # Set initial system states
- p_t_0 = p_t[0]
- q_t_0 = q_t_shuffled[0]
- dp_t_0 = dp_t[0]
- w_t_0 = w_t_world[0]
- rot_0 = RotationMatrix(R.from_quat(q_t_0).as_matrix())
- pose_0 = RigidTransform(rot_0, p_t_0)
- vel_0 = SpatialVelocity(w_t_0, dp_t_0)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_0)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_0, plant_context)
- simulator_time = 0.0
- time_step = 0.01
- trajectory_length = len(q_t)
- playback_speed = 1.0
- while True:
- initial_time = simulator.get_context().get_time()
- for i in range(1, trajectory_length):
- print(f'z position: {p_t[i, -1]}')
- q_t_i = q_t_shuffled[i]
- p_t_i = p_t[i]
- dp_t_i = dp_t[i]
- w_t_i = w_t_world[i]
- rot_i = RotationMatrix(R.from_quat(q_t_i).as_matrix())
- pose_i = RigidTransform(rot_i, p_t_i)
- vel_i = SpatialVelocity(w_t_i, dp_t_i)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_i)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_i, plant_context)
- target_time = initial_time + i * time_step
- simulator.AdvanceTo(target_time)
- time.sleep(time_step / playback_speed)
- simulator.get_mutable_context().SetTime(initial_time)
- print("Reinitializing...")
- simulator_time = 0.0
- simulator.get_mutable_context().SetTime(0.)
- plant.SetPositions(plant_context, q0)
- plant.SetVelocities(plant_context, v0)
- p_t_0 = p_t[0]
- q_t_0 = q_t_shuffled[0]
- dp_t_0 = dp_t[0]
- w_t_0 = w_t_world[0]
- rot_0 = RotationMatrix(R.from_quat(q_t_0).as_matrix())
- pose_0 = RigidTransform(rot_0, p_t_0)
- vel_0 = SpatialVelocity(w_t_0, dp_t_0)
- for body_index in plant.GetFloatingBaseBodies():
- plant.SetFreeBodyPose(plant_context, plant.get_body(body_index), pose_0)
- plant.SetFreeBodySpatialVelocity(plant.get_body(body_index), vel_0, plant_context)
- simulator.Initialize()
-
-def postprocess(folder_path, tosses_to_remove):
- tosses_to_remove.sort(reverse=True)
- for toss in tosses_to_remove:
- filename_to_remove = f"{toss - 1}.pt"
- full_path_to_remove = os.path.join(folder_path, filename_to_remove)
- if not os.path.exists(full_path_to_remove):
- print(f"File corresponding to toss {toss} does not exist!")
- continue
- os.remove(full_path_to_remove)
- number_to_remove = toss - 1
- current_number = number_to_remove + 1
- while True:
- current_filename = f"{current_number}.pt"
- full_current_path = os.path.join(folder_path, current_filename)
- if os.path.exists(full_current_path):
- new_filename = f"{current_number - 1}.pt"
- full_new_path = os.path.join(folder_path, new_filename)
- os.rename(full_current_path, full_new_path)
- current_number += 1
- else:
- break
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--toss_id",
- type=int,
- required=True,
- )
- parser.add_argument(
- "--type",
- type=str,
- required=True,
- )
- args = parser.parse_args()
- toss_id = args.toss_id
- toss_type = args.type
- if toss_type == 'napkin':
- napkin_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_napkin/{toss_id-1}.pt'
- napkin_urdf_dir = "drake/../../../../../../../../../../home/cnets-vision/mengti_ws/BundleSDF/assets/gt_napkin.urdf"
- simulate_toss_with_traj(napkin_traj_dir, napkin_urdf_dir)
- # napkin_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_napkin'
- # bad_tosses = [5, 9, 10]
- # postprocess(napkin_dir, bad_tosses)
- elif toss_type == 'bottle':
- bottle_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_bottle'
- bottle_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_bottle/{toss_id-1}.pt'
- bottle_urdf_dir = "drake/../../../../../../../../../../home/cnets-vision/mengti_ws/BundleSDF/assets/gt_bottle.urdf"
- simulate_toss_with_traj(bottle_traj_dir, bottle_urdf_dir)
- elif toss_type == 'cube':
- cube_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/contactnets_cube'
- cube_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/contactnets_cube/{toss_id-1}.pt'
- simulate_cube_toss_with_traj(None, cube_traj_dir)
- elif toss_type == 'milk':
- # milk_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_milk'
- # milk_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_milk/{toss_id-1}.pt'
- milk_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_milk'
- milk_traj_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_milk/{toss_id-1}.pt'
- milk_urdf_dir = "drake/../../../../../../../../../../home/cnets-vision/mengti_ws/BundleSDF/assets/gt_bottle.urdf"
- simulate_toss_with_traj(milk_traj_dir, milk_urdf_dir)
- elif toss_type == 'prism':
- # prism_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_prism'
- # prism_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_prism/{toss_id-1}.pt'
- prism_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_prism'
- prism_traj_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_prism/{toss_id-1}.pt'
- prism_urdf_dir = "drake/../../../../../../../../../../home/cnets-vision/mengti_ws/BundleSDF/assets/gt_prism.urdf"
- simulate_toss_with_traj(prism_traj_dir, prism_urdf_dir)
- elif toss_type == 'toblerone':
- # toblerone_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_toblerone'
- # toblerone_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_toblerone/{toss_id-1}.pt'
- toblerone_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_toblerone'
- toblerone_traj_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_toblerone/{toss_id-1}.pt'
- toblerone_urdf_dir = "drake/../../../../../../../../../../home/cnets-vision/mengti_ws/BundleSDF/assets/gt_toblerone.urdf"
- simulate_toss_with_traj(toblerone_traj_dir, toblerone_urdf_dir)
- elif toss_type == 'half':
- # half_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_half'
- # half_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_half/{toss_id-1}.pt'
- half_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_half'
- half_traj_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_half/{toss_id-1}.pt'
- simulate_toss_with_traj(half_traj_dir, half_urdf_dir)
- elif toss_type == 'egg':
- # egg_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_egg'
- # egg_traj_dir = f'/home/cnets-vision/mengti_ws/BundleSDF/dair_pll/assets/bundlesdf_egg/{toss_id-1}.pt'
- egg_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_egg'
- egg_traj_dir = f'/home/cnets-vision/mengti_ws/dair_pll_latest/assets/bundlesdf_egg/{toss_id-1}.pt'
- simulate_toss_with_traj(egg_traj_dir, egg_urdf_dir)
- else:
- print('Invalid toss type')
\ No newline at end of file
diff --git a/dair_pll_old/test/test_surface_normals.py b/dair_pll_old/test/test_surface_normals.py
deleted file mode 100644
index 5c95ea9..0000000
--- a/dair_pll_old/test/test_surface_normals.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from dair_pll.deep_support_function import extract_outward_normal_hyperplanes, get_mesh_summary_from_polygon
-from dair_pll.geometry import Polygon
-import pywavefront # type: ignore
-import torch
-from torch import Tensor
-torch.set_printoptions(threshold=10_000)
-filename = '/home/cnets-vision/mengti_ws/dair_pll_latest/assets/cube_convex_hull_rescale_simplified_alt.obj'
-output_file = '/home/cnets-vision/mengti_ws/dair_pll_latest/assets/cube_convex_hull_rescale_simplified_with_normals.obj'
-mesh = pywavefront.Wavefront(filename)
-vertices = Tensor(mesh.vertices)
-polygon = Polygon(vertices)
-mesh_summary = get_mesh_summary_from_polygon(polygon)
-normals = extract_outward_normal_hyperplanes(
- mesh_summary.vertices.unsqueeze(0),
- mesh_summary.faces.unsqueeze(0)
- )[0].squeeze(0)
-with open(output_file, 'w') as file:
- # Write vertices to the file
- for vertex in vertices.numpy():
- file.write(f"v {vertex[0]} {vertex[1]} {vertex[2]}\n")
-
- # Write normals to the file
- for normal in normals.numpy():
- file.write(f"vn {normal[0]} {normal[1]} {normal[2]}\n")
-
- # Write faces to the file (assuming you have them)
- # This is just a basic example, adjust based on your faces' structure
- for face in mesh_summary.faces.numpy():
- # +1 because obj indexing starts at 1, not 0
- file.write(f"f {face[0]+1}//{face[0]+1} {face[1]+1}//{face[1]+1} {face[2]+1}//{face[2]+1}\n")
\ No newline at end of file
diff --git a/dair_pll_old/test/test_video_writer.py b/dair_pll_old/test/test_video_writer.py
deleted file mode 100644
index e16aedc..0000000
--- a/dair_pll_old/test/test_video_writer.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Test script for doing forced triggering for Drake's VideoWriter.
-
-from pydrake.multibody.parsing import Parser
-from pydrake.multibody.plant import MultibodyPlant, AddMultibodyPlantSceneGraph
-from pydrake.multibody.plant import CoulombFriction
-from pydrake.multibody.tree import world_model_instance
-from pydrake.systems.analysis import Simulator
-from pydrake.systems.framework import DiagramBuilder
-from pydrake.math import RigidTransform, RollPitchYaw
-from pydrake.geometry import HalfSpace
-from pydrake.visualization import VideoWriter
-from pydrake.all import AngleAxis
-from dair_pll import file_utils
-
-from PIL import Image, ImageSequence
-
-from copy import deepcopy
-
-import numpy as np
-import pdb
-import torch
-from scipy.spatial.transform import Rotation as R
-import yaml
-
-DUMMY_DT = 0.001 # "DUMMY" because goal to force trigger, not run a simulation.
-
-URDFS = {"cube": file_utils.get_asset("contactnets_cube.urdf")}
-trajectory_dir = '/home/cnets-vision/mengti_ws/dair_pll_latest/assets/contactnets_cube/0.pt'
-traj = torch.load(trajectory_dir) #p_t, quat_shuffle, dp_t, w_t
-print(f'traj loaded: {traj.size()}') #N,13
-p_t = traj[:,4:7].numpy() #N,3
-q_t = traj[:,:4].numpy() #N,4, w,x,y,z
-# q_t_shuffled = np.concatenate((q_t[:, 1:], q_t[:, 0].reshape(-1,1)), axis=1) ##N,4, x,y,z,w
-dp_t = traj[:,10:].numpy() #N,3
-w_t = traj[:,7:10].numpy() #N,3, in body frame
-w_t_world = np.zeros_like(w_t)
-for i in range(traj.shape[0]):
- rot = R.from_quat(q_t[i]).as_matrix()
- w_t_i = w_t[i]
- w_t_world[i] = rot @ w_t_i.T
-# w_t_world = R.from_quat(q_t_shuffled).as_matrix() @ w_t.T
-print(p_t.shape, q_t.shape, dp_t.shape, w_t_world.shape)
-trajectory = np.concatenate((q_t, p_t, w_t, dp_t), axis=1) #N,13
-# In order of [qw, qx, qy, qz, x, y, z, wx, wy, wz, vx, vy, vz].
-STATE_TRAJS = {
- "ground": np.array([[], [], [], []]),
- # "cube": np.array([[1, 0, 0, 0, 0, 0, 0.0, 0, 0, 0, 0, 0, 0],
- # [1, 0, 0, 0, 0.1, 0.1, 0.1, 0, 0, 0, 0, 0, 0],
- # [1, 0, 0, 0, 0.2, 0.2, 0.2, 0, 0, 0, 0, 0, 0],
- # [1, 0, 0, 0, 0.3, 0.3, 0.3, 0, 0, 0, 0, 0, 0]])
- "cube": trajectory
-}
-
-def setup_extrinsic(translation, axis_vec):
- angle = np.linalg.norm(axis_vec)
- axis = axis_vec / angle
- return translation, angle, axis
-
-CAMERA_EXTRINSICS_FILE = "/home/cnets-vision/mengti_ws/robot_filter/assets/realsense_pose_cube_old.yaml"
-cam = 'cam0' # realsense camera name
-with open(CAMERA_EXTRINSICS_FILE, 'r') as stream:
- data_loaded = yaml.safe_load(stream)
-print(data_loaded[cam]['pose']['position'])
-
-cam_pos_dict = data_loaded[cam]['pose']['position']
-cam_trans = np.array([cam_pos_dict['x'], cam_pos_dict['y'], cam_pos_dict['z']]).reshape(-1, 1)
-cam_rot_dict = data_loaded[cam]['pose']['rotation']
-cam_axis_vec = np.array([cam_rot_dict['x'], cam_rot_dict['y'], cam_rot_dict['z']])
-
-# Going through PLL code:
-# Starts in meshcat_utils.generate_visualization_system()
-# plant_diagram = MultibodyPlantDiagram(urdfs, dt, enable_visualizer)
-builder = DiagramBuilder()
-plant, scene_graph = AddMultibodyPlantSceneGraph(builder, DUMMY_DT)
-parser = Parser(plant)
-
-# Build [model instance index] list, starting with world model, which is always
-# added by default.
-model_ids = [world_model_instance()]
-model_ids.extend([parser.AddModelFromFile(urdf, name) \
- for name, urdf in URDFS.items()])
-
-# Add video writer to diagram. From drake_utils.MultibodyPlantDiagram
-sensor_pose = RigidTransform(RollPitchYaw([-np.pi/2, 0, np.pi/2]), [2., 0., 0.2])
-# cam_pos, angle, axis = setup_extrinsic(cam_trans, cam_axis_vec)
-# sensor_pose = RigidTransform(AngleAxis(angle=angle, axis=axis), cam_pos)
-video_writer = VideoWriter.AddToBuilder(filename="output.gif",
- builder=builder,
- sensor_pose=sensor_pose)
-
-# Add ground plane at z=0.
-halfspace_transform = RigidTransform()
-friction = CoulombFriction(1.0, 1.0)
-plant.RegisterCollisionGeometry(plant.world_body(), halfspace_transform,
- HalfSpace(), "ground", friction)
-
-# Builds and initialize simulator from diagram
-plant.Finalize()
-diagram = builder.Build()
-diagram.CreateDefaultContext()
-sim = Simulator(diagram)
-sim.Initialize()
-sim.set_publish_every_time_step(False)
-
-
-def set_plant_state(traj_idx):
- # Set state initial condition in internal Drake ``Simulator`` context.
- global plant, sim, model_ids
-
- sim_context = sim.get_mutable_context()
- sim_context.SetTime(DUMMY_DT/4)
- plant_context = plant.GetMyMutableContextFromRoot(sim.get_mutable_context())
-
- # Iterate over every object in the plant.
- for model_id, obj_key in zip(model_ids, STATE_TRAJS.keys()):
- q_drake = STATE_TRAJS[obj_key][traj_idx, :7]
- v_drake = STATE_TRAJS[obj_key][traj_idx, 7:]
- plant.SetPositions(plant_context, model_id, q_drake)
- plant.SetVelocities(plant_context, model_id, v_drake)
-
- sim.Initialize()
-
-
-for i in range(STATE_TRAJS["ground"].shape[0]):
- # Set the plant state.
- set_plant_state(i)
-
- # Force trigger a video writer publish event.
- sim_context = sim.get_mutable_context()
- video_context = video_writer.GetMyContextFromRoot(sim_context)
- video_writer._publish(video_context)
-
-print(f'Published {i+1} frames via video_writer._publish()')
-
-# Save the gif.
-video_writer.Save()
-
-# Since Drake's VideoWriter defaults to not looping gifs, re-load and re-save
-# the gif to ensure it loops.
-im = Image.open(video_writer._filename)
-im.save(f'_{video_writer._filename}', save_all=True, loop=0)
-im.close()
-
-# Check how many frames there are.
-im = Image.open(f'_{video_writer._filename}')
-print(f'Frames in resulting gif: {len(list(ImageSequence.Iterator(im)))}')
-print(f'Saved original gif at: {video_writer._filename}')
-print(f'Saved looping gif at: _{video_writer._filename}')
-
-# pdb.set_trace()
-
diff --git a/dair_pll_old/test/vector_field_tests.py b/dair_pll_old/test/vector_field_tests.py
deleted file mode 100644
index ea47536..0000000
--- a/dair_pll_old/test/vector_field_tests.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""Test script for visualizing force vector fields."""
-
-import pdb
-
-import numpy as np
-import matplotlib.pyplot as plt
-import torch
-from torch import Tensor
-from typing import Tuple
-
-
-ROTATION_PRIMITIVE = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])
-INWARD_PRIMITIVE = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 0]])
-
-W_ROT = 1.
-W_IN = 1.
-
-
-def vortex_by_location(xyz_loc: Tensor) -> Tensor:
- xy_mag = torch.linalg.norm(xyz_loc[:2])
- rotation_mat = W_ROT * ROTATION_PRIMITIVE / xy_mag
- inward_mat = W_IN * INWARD_PRIMITIVE
-
- return (rotation_mat + inward_mat) @ xyz_loc
-
-
-
-def vortex_by_coords(x: float, y: float, z: float) -> Tuple[float, float, float]:
- xy_mag = np.sqrt(x**2 + y**2) + 1e-4
- rotation_mat = W_ROT * ROTATION_PRIMITIVE / xy_mag
- inward_mat = W_IN * INWARD_PRIMITIVE / xy_mag
-
- force = (rotation_mat + inward_mat) @ np.array([x, y, z])
-
- return force[0], force[1], force[2]
-
-
-vf_x = lambda x, y, z: vortex_by_coords(x, y, z)[0]
-vf_y = lambda x, y, z: vortex_by_coords(x, y, z)[1]
-vf_z = lambda x, y, z: vortex_by_coords(x, y, z)[2]
-
-
-# # 1D arrays
-# x_locs = np.arange(-0.5, 0.5, 0.01)
-# y_locs = np.arange(-0.5, 0.5, 0.01)
-
-# # Meshgrid
-# X, Y = np.meshgrid(x_locs, y_locs, indexing='ij')
-x = np.arange(-0.5, 0.5, 0.1)
-y = np.arange(-0.5, 0.5, 0.1)
-
-# Meshgrid
-X,Y = np.meshgrid(x,y)
-
-# Store U and V
-U, V = np.zeros_like(X), np.zeros_like(X)
-
-for i in range(X.shape[0]):
- for j in range(X.shape[0]):
- U[i,j] = vf_x(X[i,j], Y[i,j], 0)
- V[i,j] = vf_y(X[i,j], Y[i,j], 0)
-
-# Depict illustration
-plt.figure(figsize=(10, 10))
-plt.quiver(X,Y,U,V, units='xy')
-plt.streamplot(X,Y,U,V, density=1.4, linewidth=None, color='#A23BEC')
-plt.title('Electromagnetic Field')
-
-plt.grid()
-plt.savefig('/home/bibit/Desktop/test.png')
-
-pdb.set_trace()
diff --git a/dair_pll_old/test/video_writer_test.py b/dair_pll_old/test/video_writer_test.py
deleted file mode 100644
index 4a38600..0000000
--- a/dair_pll_old/test/video_writer_test.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Test script for doing forced triggering for Drake's VideoWriter.
-
-from pydrake.multibody.parsing import Parser
-from pydrake.multibody.plant import MultibodyPlant, AddMultibodyPlantSceneGraph
-from pydrake.multibody.plant import CoulombFriction
-from pydrake.multibody.tree import world_model_instance
-from pydrake.systems.analysis import Simulator
-from pydrake.systems.framework import DiagramBuilder
-from pydrake.math import RigidTransform, RollPitchYaw
-from pydrake.geometry import HalfSpace
-from pydrake.visualization import VideoWriter
-
-from dair_pll import file_utils
-
-from PIL import Image, ImageSequence
-
-from copy import deepcopy
-
-import numpy as np
-import pdb
-
-
-DUMMY_DT = 0.001 # "DUMMY" because goal to force trigger, not run a simulation.
-
-URDFS = {"cube": file_utils.get_asset("contactnets_cube.urdf")}
-
-# In order of [qw, qx, qy, qz, x, y, z, wx, wy, wz, vx, vy, vz].
-STATE_TRAJS = {
- "ground": np.array([[], [], [], []]),
- "cube": np.array([[1, 0, 0, 0, 0, 0, 0.0, 0, 0, 0, 0, 0, 0],
- [1, 0, 0, 0, 0.1, 0.1, 0.1, 0, 0, 0, 0, 0, 0],
- [1, 0, 0, 0, 0.2, 0.2, 0.2, 0, 0, 0, 0, 0, 0],
- [1, 0, 0, 0, 0.3, 0.3, 0.3, 0, 0, 0, 0, 0, 0]])}
-
-
-# Going through PLL code:
-# Starts in meshcat_utils.generate_visualization_system()
-# plant_diagram = MultibodyPlantDiagram(urdfs, dt, enable_visualizer)
-builder = DiagramBuilder()
-plant, scene_graph = AddMultibodyPlantSceneGraph(builder, DUMMY_DT)
-parser = Parser(plant)
-
-# Build [model instance index] list, starting with world model, which is always
-# added by default.
-model_ids = [world_model_instance()]
-model_ids.extend([parser.AddModelFromFile(urdf, name) \
- for name, urdf in URDFS.items()])
-
-# Add video writer to diagram. From drake_utils.MultibodyPlantDiagram
-sensor_pose = RigidTransform(RollPitchYaw([-np.pi/2, 0, np.pi/2]), [2., 0., 0.])
-video_writer = VideoWriter.AddToBuilder(filename="output.gif",
- builder=builder,
- sensor_pose=sensor_pose)
-
-# Add ground plane at z=0.
-halfspace_transform = RigidTransform()
-friction = CoulombFriction(1.0, 1.0)
-plant.RegisterCollisionGeometry(plant.world_body(), halfspace_transform,
- HalfSpace(), "ground", friction)
-
-# Builds and initialize simulator from diagram
-plant.Finalize()
-diagram = builder.Build()
-diagram.CreateDefaultContext()
-sim = Simulator(diagram)
-sim.Initialize()
-sim.set_publish_every_time_step(False)
-
-
-def set_plant_state(traj_idx):
- # Set state initial condition in internal Drake ``Simulator`` context.
- global plant, sim, model_ids
-
- sim_context = sim.get_mutable_context()
- sim_context.SetTime(DUMMY_DT/4)
- plant_context = plant.GetMyMutableContextFromRoot(sim.get_mutable_context())
-
- # Iterate over every object in the plant.
- for model_id, obj_key in zip(model_ids, STATE_TRAJS.keys()):
- q_drake = STATE_TRAJS[obj_key][traj_idx, :7]
- v_drake = STATE_TRAJS[obj_key][traj_idx, 7:]
-
- plant.SetPositions(plant_context, model_id, q_drake)
- plant.SetVelocities(plant_context, model_id, v_drake)
-
- sim.Initialize()
-
-
-
-for i in range(STATE_TRAJS["ground"].shape[0]):
- # Set the plant state.
- set_plant_state(i)
-
- # Force trigger a video writer publish event.
- sim_context = sim.get_mutable_context()
- video_context = video_writer.GetMyContextFromRoot(sim_context)
- video_writer._publish(video_context)
-
-print(f'Published {i+1} frames via video_writer._publish()')
-
-# Save the gif.
-video_writer.Save()
-
-# Since Drake's VideoWriter defaults to not looping gifs, re-load and re-save
-# the gif to ensure it loops.
-im = Image.open(video_writer._filename)
-im.save(f'_{video_writer._filename}', save_all=True, loop=0)
-im.close()
-
-# Check how many frames there are.
-im = Image.open(f'_{video_writer._filename}')
-print(f'Frames in resulting gif: {len(list(ImageSequence.Iterator(im)))}')
-print(f'Saved original gif at: {video_writer._filename}')
-print(f'Saved looping gif at: _{video_writer._filename}')
-
-# pdb.set_trace()
-
diff --git a/dair_pll_old/test/vis_vector_field_data.py b/dair_pll_old/test/vis_vector_field_data.py
deleted file mode 100644
index bd0046a..0000000
--- a/dair_pll_old/test/vis_vector_field_data.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""Visualization tests of data created with force vector fields. The goal is to
-be able to visualize a generated toss already saved to storage. possibly
-overlaid with a toss from the same initial condition and no force vector field.
-"""
-
-import os
-import os.path as op
-import pdb
-
-import shutil
-import torch
-from torch import Tensor
-
-from dair_pll.drake_system import DrakeSystem
-from dair_pll import vis_utils
-
-
-pdb.set_trace()
-SYSTEM = 'cube'
-RESULTS_FOLDER_NAME = 'viscous_cube' #'vortex_cube'
-
-TRUE_URDFS = {'cube': '/home/bibit/dair_pll/assets/contactnets_cube.urdf',
- 'elbow': '/home/bibit/dair_pll/assets/contactnets_elbow.urdf'}
-URDF = {SYSTEM: TRUE_URDFS[SYSTEM]}
-
-TOSS_DIRS = {
- 'cube': f'/home/bibit/dair_pll/results/{RESULTS_FOLDER_NAME}/data/ground_truth'}
-TOSS_DIR = TOSS_DIRS[SYSTEM]
-
-DT = 0.0068
-DUMMY_CARRY = Tensor([0])
-
-VIS_DIR = '/home/bibit/dair_pll/test'
-VIS_FILE = op.join(VIS_DIR, 'test.gif')
-
-DRAKE_SYSTEM = DrakeSystem(URDF, DT)
-VIS_SYSTEM = vis_utils.generate_visualization_system(DRAKE_SYSTEM, VIS_FILE)
-
-
-def get_non_augmented_traj(traj):
- x0 = traj[0, :].unsqueeze(0)
- steps = traj.shape[0] - 1
-
- na_traj, _ = DRAKE_SYSTEM.simulate(x0, DUMMY_CARRY, steps=steps)
-
- return na_traj
-
-
-pdb.set_trace()
-
-for toss in os.listdir(TOSS_DIR):
- print(f"Making comparison toss for {toss}...")
- toss_file = op.join(TOSS_DIR, toss)
- augmented_traj = torch.load(toss_file)
-
- no_augment_traj = get_non_augmented_traj(augmented_traj)
-
- space = DRAKE_SYSTEM.space
- vis_traj = torch.cat( \
- (space.q(augmented_traj), space.q(no_augment_traj),
- space.v(augmented_traj), space.v(no_augment_traj)), -1)
-
- video, framerate = vis_utils.visualize_trajectory(VIS_SYSTEM, vis_traj)
-
- toss_num = toss.split('.')[0]
- new_filename = f'toss_{toss_num}.gif'
- new_file = op.join(VIS_DIR, new_filename)
-
- test_filepath = op.join(VIS_DIR, 'test_.gif')
-
- shutil.copyfile(test_filepath, new_file)
diff --git a/data_preparation.py b/data_preparation.py
deleted file mode 100644
index 3917788..0000000
--- a/data_preparation.py
+++ /dev/null
@@ -1,311 +0,0 @@
-import argparse
-import os
-import numpy as np
-import torch
-
-from scipy.spatial.transform import Rotation as R, RotationSpline
-from scipy import signal
-from scipy.interpolate import CubicSpline, interp1d
-from pyquaternion import Quaternion
-import matplotlib.pyplot as plt
-import yaml
-import pdb
-import math
-
-"""Class for generating and managing datasets for ContactNets.
-"""
-PLANK_HEIGHT = 0.0145 #0.022
-
-def rotvecfix(rv):
- for i in range(rv.shape[0]-1):
- rvi = rv[i,:]
- rvip1 = rv[i+1,:]
- theta = np.linalg.norm(rvip1)
- if theta > 0.0:
- rnew = rvip1*(1 - 2*math.pi/theta)
- if np.linalg.norm(rvi - rnew) < np.linalg.norm(rvi - rvip1):
- rv[i+1,:] = rnew
- return rv
-
-def smooth_positions(positions, window_size=5):
- """
- Smooths positions using a moving average.
- :param positions: Nx3 array of positions.
- :param window_size: Size of the moving average window.
- :return: Smoothed Nx3 array of positions.
- """
- smoothed_positions = np.zeros_like(positions)
- half_window = window_size // 2
-
- for i in range(positions.shape[0]):
- start_idx = max(0, i - half_window)
- end_idx = min(positions.shape[0], i + half_window)
- smoothed_positions[i] = np.mean(positions[start_idx:end_idx], axis=0)
-
- return smoothed_positions
-
-def smooth_quaternions_pyquat(quats, alpha=0.5):
- """
- Smooth quaternions using Slerp with pyquaternion.
- :param quats: Nx4 array of quaternions, xyzw
- :param alpha: Interpolation factor (0.0 <= alpha <= 1.0).
- :return: Smoothed Nx4 array of quaternions.
- """
- quats = xyzw2wxyz(quats)
- smoothed_quats = np.zeros_like(quats)
- smoothed_quats[0] = quats[0]
-
- for i in range(1, len(quats)):
- q0 = Quaternion(quats[i-1])
- q1 = Quaternion(quats[i])
- smoothed = Quaternion.slerp(q0, q1, alpha)
- smoothed_quats[i] = [smoothed.w, smoothed.x, smoothed.y, smoothed.z]
- smoothed_quats = wxyz2xyzw(smoothed_quats)
- return smoothed_quats #xyzw
-
-def setup_extrinsic(translation, axis_vec):
- rotation_matrix = R.from_rotvec(axis_vec.ravel()).as_matrix()
- rotation_inverse = rotation_matrix.T
- translation_inverse = -rotation_inverse @ translation.ravel()
- extrinsic = np.eye(4)
- extrinsic[:3, :3] = rotation_inverse
- extrinsic[:3, 3] = translation_inverse
- return extrinsic
-
-def camera_to_world(m, translation, axis_vec):
- extrinsic = setup_extrinsic(translation, axis_vec)
- return np.linalg.inv(extrinsic) @ m
-
-def transform_bundletrack_output(
- pred_pose, output_pose_dir, odom_file_dir, translation, axis_vec, to_world=False
-):
- """
- https://github.com/wenbowen123/BundleTrack/issues/38
- """
- init_pose = np.loadtxt(
- output_pose_dir + "%04i.txt" % 1
- ) # initial cube pose in camera frame, bundletrack's internal coordinate system
- init_pose_new = np.loadtxt(
- odom_file_dir + "%04i.txt" % 0
- ) # initial cube pose represented in camera frame, matching tagslam
- pred_new = (pred_pose @ np.linalg.inv(init_pose)) @ init_pose_new
- if to_world:
- pred_new_world = camera_to_world(pred_new, translation, axis_vec)
- return pred_new_world
- return pred_new
-
-def wxyz2xyzw(quat_wxyz):
- """
- quat_wxyz: (N,4)
- """
- w = quat_wxyz[:, 0:1]
- xyz = quat_wxyz[:, 1:]
- return np.concatenate((xyz, w), axis=1)
-
-def xyzw2wxyz(quat_xyzw):
- """
- quat_xyzw: (N,4)
- """
- xyz = quat_xyzw[:, 0:3]
- w = quat_xyzw[:, 3:4]
- return np.concatenate((w, xyz), axis=1)
-
-class DatasetManagement:
- def __init__(self, frame_num, start_frame, end_frame, timestamps, toss_id, cam_trans, cam_axis_vec, odom_dir, pose_dir, save_dir, gt=True):
- self.tagslam_dir = f'/home/cnets-vision/mengti_ws/robot_filter/dataset/old_toss_{toss_id}/tagslam_poses'
- self.frame_num = frame_num
- self.start_frame = start_frame
- self.end_frame = end_frame
- self.timestamps = timestamps
- self.toss_id = toss_id-1
- self.positions = [] #(N, 3)
- self.quats = [] #(N, 4)
- self.interpolated_ang_vels = [] #(N,3)
- self.interpolated_lin_vels = [] #(N,3)
- self.rot_t = None
- ###### sophter ########
- self.t = None # N,
- self.q_t = None # 4,N, x,y,z,w
- self.p_t = None #3,N
- #######################
- self.odom_dir = odom_dir
- self.pose_dir = pose_dir
- self.save_dir = save_dir
- self.cam_trans = cam_trans
- self.cam_axis_vec = cam_axis_vec
- self.load_gt = gt
- self.load_poses()
-
- def load_poses(self):
- p_t = []
- q_t = []
- t = []
- ##### tagslam
- tagslam_data = np.loadtxt(os.path.join(self.tagslam_dir, 'tagslam.txt'))
- #####
- for frame_id in range(1, self.frame_num+1):
- if frame_id < self.start_frame:
- continue
- if frame_id >= self.end_frame:
- break
- if frame_id == self.start_frame:
- t_start = self.timestamps[frame_id]
- ######
- if self.load_gt:
- print('Loading tagslam poses')
- tagslam_pose = tagslam_data[frame_id, 2:]
- q_t.append(tagslam_pose[3:])#xyzw
- p_t.append(tagslam_pose[:3])
- else:
- print('Loading estimated poses')
- pose = np.loadtxt(self.pose_dir + "%04i.txt" % frame_id)
- pose = transform_bundletrack_output(pose, self.pose_dir, self.odom_dir, self.cam_trans, self.cam_axis_vec, to_world=True)
- q_t.append(R.from_matrix(pose[:3, :3]).as_quat()) #x,y,z,w
- p_t.append(pose[:3, 3])
- ######
- curr_t = self.timestamps[frame_id]
- t.append(curr_t - t_start)
- # q_t, p_t, t = self.force_landing(q_t, p_t, t) #force the final configuraton
- quats = np.array(q_t)
- positions = np.array(p_t)
- ts = np.array(t)
- print(f'quats: {quats.shape}, positions: {positions.shape}, ts: {ts.shape}')
- self.q_t = quats.T
- self.p_t = positions.T
- self.t = ts.T
- print(f'load_poses: self.q_t {self.q_t.shape}, self.p_t: {self.p_t.shape}, self.t: {self.t.shape}')
-
- def upsample(self, quats, positions, ts):
- """
- quats: (N, 4), x,y,z,w
- """
- quats = xyzw2wxyz(quats) #w,x,y,z
- quats = quats / np.linalg.norm(quats, axis=1)[:, np.newaxis]
- # upsample the trajectory
- N = quats.shape[0]
- for i in range(1, N):
- if np.dot(quats[0], quats[i]) < 0:
- quats[i] = -quats[i]
-
- # Upsample factor
- M = 100
- new_times = np.linspace(0, 1, M)
-
- # Interpolate positions using linear interpolation
- interp_func_x = interp1d(np.linspace(0, 1, N), positions[:, 0], kind='linear')
- interp_func_y = interp1d(np.linspace(0, 1, N), positions[:, 1], kind='linear')
- interp_func_z = interp1d(np.linspace(0, 1, N), positions[:, 2], kind='linear')
-
- new_positions = np.vstack([
- interp_func_x(new_times),
- interp_func_y(new_times),
- interp_func_z(new_times)
- ]).T
- new_quaternions = np.zeros((M, 4))
- for i in range(N-1): # Adjusted loop condition
- start_idx = i * (M // (N-1))
- end_idx = start_idx + (M // (N-1))
- t_array = np.linspace(0, 1, M//(N-1))
- q0 = Quaternion(quats[i, :])
- q1 = Quaternion(quats[i+1, :]) if i < N-2 else Quaternion(quats[-1, :])
- interpolated_quats = [Quaternion.slerp(q0, q1, amount=t).elements for t in t_array]
- new_quaternions[start_idx:end_idx, :] = np.array(interpolated_quats)
- if end_idx < M:
- new_quaternions[end_idx:] = quats[-1]
-
- interp_func_timestamps = interp1d(np.linspace(0, 1, N), ts, kind='linear')
- new_timestamps = interp_func_timestamps(new_times)
- new_quaternions = wxyz2xyzw(new_quaternions) #x,y,z,w
- return new_quaternions.T, new_positions.T, new_timestamps.T
-
- def do_process(self):
- """
- Generate contactnets trajectory.
- The PLL format is in:
- [ quaternion position angular_velocity linear_velocity ]
- where:
- - position: [x, y, z] in meters
- - quaternion: [qw, qx, qy, qz]
- - linear_velocity: [vx, vy, vz] in meters/second
- - angular_velocity: [wx, wy, wz] in rad/second in body frame
- """
- rot_t = R.from_quat(self.q_t.T)
- filter_rot = True
- if filter_rot:
- rvecs = rotvecfix(rot_t.as_rotvec()).T
- for i in range(3):
- rvecs[i,:] = signal.medfilt(rvecs[i,:],kernel_size=3)
- rot_t = rot_t.from_rotvec(rvecs.T)
- quat_t = rot_t.as_quat() #x,y,z,w
-
- adjust_pos = True
- if adjust_pos: # the franka is on a plank, need to add the plank height to z
- self.p_t = self.p_t + PLANK_HEIGHT
- filter_pos = True
- if filter_pos:
- self.p_t = smooth_positions(self.p_t.T, window_size=5).T
- ##### Upsample
- quat_t = quat_t / np.linalg.norm(quat_t, axis=1)[:, None]
- self.q_t, self.p_t, self.t = self.upsample(quat_t, self.p_t.T, self.t) #xyzw
- self.q_t = self.q_t / np.linalg.norm(self.q_t, axis=0)
- print(f'upsampled self.q_t: {self.q_t.shape}')
- rot_t = R.from_quat(self.q_t.T) #N,3,3
- #####
- pdiff = self.p_t[:,1:] - self.p_t[:,:-1]
- tdiff = np.tile((self.t[1:] - self.t[:-1]).reshape([1,-1]), [3,1])
- dp_t = pdiff / tdiff
- dp_t = np.hstack((dp_t[:,[0]],dp_t))
- to_body = True
- if to_body:
- rot_diff = [rot_t[i+1] * rot_t[i].inv() for i in range(len(rot_t) - 1)]
- w_t = np.array([rd.as_rotvec() for rd in rot_diff]).T / tdiff
- w_t = np.hstack((w_t[:, [0]], w_t)).T
- w_t_body = np.zeros_like(w_t)
- for j in range(len(rot_t)):
- rotation_matrix = rot_t[j].as_matrix()
- w_t_body[j] = rotation_matrix.T @ w_t[j]
- w_t_body = w_t_body.T
- print(f'w_t_body: {w_t_body.shape}') #3, M
- print(f'dp_t: {dp_t.shape}') #3,M
- # butterworth filter of order 2 to smooth velocity states
- # sampling frequency
- fs = 148.
-
- # Cut-off frequency of angular velocity filter. < fs/2 (Nyquist)
- fc_w = 60.
-
- # Cut-off frequency of linear velocity filter. < fs/2 (Nyquist)
- fc_v = 45.
-
- # Cut-off frequency of linear accel filter. < fs/2 (Nyquist)
- fc_a = 45.
- filter_avel = True
- if filter_avel:
- # filter angular velocity
- w_w = np.clip((fc_w / (fs / 2)), a_min = 0.000001, a_max = 0.999999) # Normalize the frequency
- b, a = signal.butter(1, w_w, 'low')
- for i in range(3):
- # w_t_body[i,:] = signal.medfilt(w_t_body[i,:],kernel_size=3)
- w_t_body[i,:] = signal.savgol_filter(w_t_body[i,:], window_length=15, polyorder=4)
- filter_vel = True
- if filter_vel:
- # filter linear velocity
- w_v = np.clip((fc_v / (fs / 2)), a_min = 0.000001, a_max = 0.999999) # Normalize the frequency
- b, a = signal.butter(1, w_v, 'low')
- for i in range(3):
- #dp_t[i,:] = signal.filtfilt(b, a, dp_t[i,:],padtype='odd',padlen=100)
- # dp_t[i,:] = signal.medfilt(dp_t[i,:],kernel_size=3)
- dp_t[i,:] = signal.savgol_filter(dp_t[i,:], window_length=10, polyorder=4)
- quat_shuffle = xyzw2wxyz(self.q_t.T).T #w,x,y,z
- quat_shuffle = quat_shuffle / np.linalg.norm(quat_shuffle)
- print(f'quat normal: {np.linalg.norm(quat_shuffle)}')
- data = np.concatenate((quat_shuffle, self.p_t, w_t_body, dp_t), axis=0)
- p_t = self.p_t.T
- quat_shuffle = quat_shuffle.T
- dp_t = dp_t.T
- w_t_body = w_t_body.T
- data = data.T
- print('data: ', data.shape) #N,13
- print(p_t.shape, quat_shuffle.shape, dp_t.shape, w_t_body.shape)
- torch.save(torch.tensor(data), self.save_dir + "{}.pt".format(self.toss_id))
- print(f'file {self.toss_id}.pt saved at {self.save_dir + "{}.pt".format(self.toss_id)}')
\ No newline at end of file
diff --git a/docker/dockerfile b/docker/dockerfile
index 4a3b401..faf81b5 100644
--- a/docker/dockerfile
+++ b/docker/dockerfile
@@ -1,4 +1,5 @@
-FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
+FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
+# FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04
ENV TZ=US/Pacific
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
@@ -66,22 +67,22 @@ RUN cd / && wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-L
/opt/conda/bin/conda update -n base -c defaults conda -y &&\
/opt/conda/bin/conda config --set ssl_verify no && \
/opt/conda/bin/conda config --add channels conda-forge &&\
- /opt/conda/bin/conda create -n py38 python=3.8
+ /opt/conda/bin/conda create -n py39 python=3.9
-ENV PATH $PATH:/opt/conda/envs/py38/bin
+ENV PATH $PATH:/opt/conda/envs/py39/bin
RUN conda init bash &&\
- echo "conda activate py38" >> ~/.bashrc &&\
- conda activate py38 &&\
- pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 &&\
+ echo "conda activate py39" >> ~/.bashrc &&\
+ conda activate py39 &&\
+ pip install scipy "numpy<2.0" &&\
+ # pip install torch==2.0.0+cu117 torchvision==0.15.0+cu117 torchaudio==2.0.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 &&\
+ pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu118 &&\
pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" &&\
- pip install trimesh opencv-python wandb matplotlib imageio tqdm open3d ruamel.yaml sacred kornia pymongo pyrender jupyterlab ninja &&\
- conda install -y -c anaconda scipy
-
-
-RUN cd / && git clone --recursive https://github.com/NVIDIAGameWorks/kaolin
+ pip install trimesh opencv-python wandb matplotlib imageio tqdm open3d==0.16.0 ruamel.yaml sacred kornia pymongo pyrender jupyterlab ninja &&\
+ pip install cython yacs numpy-stl pywavefront optuna embreex &&\
+ pip install kaolin -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.1_cu118.html
ENV CUDA_HOME /usr/local/cuda
@@ -91,12 +92,46 @@ ENV OPENCV_IO_ENABLE_OPENEXR=1
RUN imageio_download_bin freeimage
-RUN conda activate py38 && cd /kaolin &&\
- # sed -i "223i\ extra_compile_args['nvcc'] += ['-gencode=arch=compute_52,code=sm_52', '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61', '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_80,code=sm_80', '-gencode=arch=compute_80,code=compute_80']" setup.py &&\
- FORCE_CUDA=1 python setup.py develop
-
#### Kaolin will change numpy version
RUN pip install transformations einops scikit-image awscli-plugin-endpoint gputil xatlas pymeshlab rtree dearpygui pytinyrenderer PyQt5 cython-npm chardet openpyxl
RUN apt-get update --fix-missing && \
apt install -y rsync lbzip2 pigz zip p7zip-full p7zip-rar
+
+
+### for pll and conversions in between
+# Install ROS Noetic
+# Set locale and timezone to avoid interactive prompts
+ENV DEBIAN_FRONTEND=noninteractive
+RUN apt-get update && \
+ apt install -y locales && \
+ locale-gen en_US.UTF-8 && \
+ update-locale LANG=en_US.UTF-8
+
+ENV LANG=en_US.UTF-8
+ENV LANGUAGE=en_US:en
+ENV LC_ALL=en_US.UTF-8
+
+# Install ROS Noetic
+RUN apt-get update && \
+ apt install -y lsb-release && \
+ sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' && \
+ curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | apt-key add - && \
+ apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt install -y ros-noetic-desktop
+
+# Install Xvfb for headless display
+RUN apt install -y xvfb ffmpeg x11-apps
+
+# Install Google Chrome and ChromeDriver
+RUN wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add - && \
+ echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
+ apt-get update && \
+ apt install -y google-chrome-stable
+
+# Download and install ChromeDriver version 137
+RUN wget https://storage.googleapis.com/chrome-for-testing-public/137.0.7151.68/linux64/chromedriver-linux64.zip && \
+ unzip chromedriver-linux64.zip && \
+ chmod +x chromedriver-linux64/chromedriver && \
+ mv chromedriver-linux64/chromedriver /usr/bin/ && \
+ rm -rf chromedriver-linux64*
diff --git a/docker/run_custom_container.sh b/docker/run_custom_container.sh
new file mode 100644
index 0000000..574dbe5
--- /dev/null
+++ b/docker/run_custom_container.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# This script supports running the Docker container with GPU support both locally and on a remote server with X11 forwarding.
+
+DIR=$(pwd)/../
+DISPLAY=${DISPLAY:-:0}
+XAUTH=/tmp/.docker.xauth
+
+# Setup X11 forwarding
+touch $XAUTH
+xauth nlist $DISPLAY | sed -e 's/^..../ffff/' | xauth -f $XAUTH nmerge -
+xhost +local:docker
+
+docker run \
+ -e XAUTHORITY=$XAUTH \
+ -v $XAUTH:$XAUTH \
+ --gpus all \
+ --env NVIDIA_DISABLE_REQUIRE=1 \
+ -it \
+ --network=host \
+ --name vysics \
+ --cap-add=SYS_PTRACE \
+ --security-opt seccomp=unconfined \
+ -v /home:/home \
+ -v /tmp:/tmp \
+ -v /mnt:/mnt \
+ -v $DIR:$DIR \
+ --ipc=host \
+ -e DISPLAY=$DISPLAY \
+ -e GIT_INDEX_FILE \
+ vysics:cu118 \
+ bash
+
+# Cleanup for security
+# Manually run: xhost -local:docker when you're completely done
\ No newline at end of file
diff --git a/media/Vysics_thumbnail.png b/media/Vysics_thumbnail.png
new file mode 100644
index 0000000..14aea27
Binary files /dev/null and b/media/Vysics_thumbnail.png differ
diff --git a/media/driller.gif b/media/driller.gif
deleted file mode 100644
index 96380b6..0000000
Binary files a/media/driller.gif and /dev/null differ
diff --git a/media/milk_jug.gif b/media/milk_jug.gif
deleted file mode 100644
index 8d8fe46..0000000
Binary files a/media/milk_jug.gif and /dev/null differ
diff --git a/media/preview_results_c.gif b/media/preview_results_c.gif
deleted file mode 100644
index 3d5495f..0000000
Binary files a/media/preview_results_c.gif and /dev/null differ
diff --git a/media/preview_results_c.mp4 b/media/preview_results_c.mp4
deleted file mode 100644
index af88826..0000000
Binary files a/media/preview_results_c.mp4 and /dev/null differ
diff --git a/media/problem_setup_c.gif b/media/problem_setup_c.gif
deleted file mode 100644
index 076a3a9..0000000
Binary files a/media/problem_setup_c.gif and /dev/null differ
diff --git a/media/problem_setup_c.mp4 b/media/problem_setup_c.mp4
deleted file mode 100644
index 77da5ac..0000000
Binary files a/media/problem_setup_c.mp4 and /dev/null differ
diff --git a/media/vysics_intro_spatial_small.mp4 b/media/vysics_intro_spatial_small.mp4
new file mode 100644
index 0000000..783a63c
Binary files /dev/null and b/media/vysics_intro_spatial_small.mp4 differ
diff --git a/mesh_nearby_pts.pt b/mesh_nearby_pts.pt
deleted file mode 100644
index 966097e..0000000
Binary files a/mesh_nearby_pts.pt and /dev/null differ
diff --git a/mesh_nearby_sdfs.pt b/mesh_nearby_sdfs.pt
deleted file mode 100644
index 60f6a59..0000000
Binary files a/mesh_nearby_sdfs.pt and /dev/null differ
diff --git a/mycuda/pyproject.toml b/mycuda/pyproject.toml
new file mode 100644
index 0000000..f5b211d
--- /dev/null
+++ b/mycuda/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools>=61.0", "wheel"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/mycuda/setup.py b/mycuda/setup.py
index ad00334..d3245d0 100644
--- a/mycuda/setup.py
+++ b/mycuda/setup.py
@@ -15,8 +15,11 @@
code_dir = os.path.dirname(os.path.realpath(__file__))
-nvcc_flags = ['-Xcompiler', '-O3', '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__']
-c_flags = ['-O3', '-std=c++14']
+# nvcc_flags = ['-Xcompiler', '-O3', '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__']
+# c_flags = ['-O3', '-std=c++14']
+# for cu118 env
+nvcc_flags = ['-Xcompiler', '-O3', '-std=c++17', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__']
+c_flags = ['-O3', '-std=c++17']
setup(
name='common',
diff --git a/obselete/keep_first_half_of_frames.py b/obselete/keep_first_half_of_frames.py
new file mode 100644
index 0000000..cf4a602
--- /dev/null
+++ b/obselete/keep_first_half_of_frames.py
@@ -0,0 +1,27 @@
+# Copy the first 280 frames in depth, masks, rgb, and annotated_poses subdirectories in the directory.
+# The names are xxxx.png, xxxx.pnh, xxxx.png, and xxxx.txt, starting from 1.
+# This is to create the cubeslow_2 dataset without the free-falling frames.
+
+import os
+
+dir_source = "data/cube_2"
+dir_target = "data/cubeslow_2"
+depth_dir = os.path.join(dir_target, "depth")
+masks_dir = os.path.join(dir_target, "masks")
+rgb_dir = os.path.join(dir_target, "rgb")
+annotated_poses_dir = os.path.join(dir_target, "annotated_poses")
+cam_K_file = os.path.join(dir_target, "cam_K.txt")
+
+os.makedirs(dir_target, exist_ok=True)
+os.makedirs(depth_dir, exist_ok=True)
+os.makedirs(masks_dir, exist_ok=True)
+os.makedirs(rgb_dir, exist_ok=True)
+os.makedirs(annotated_poses_dir, exist_ok=True)
+
+for i in range(1, 281):
+ os.system(f"cp {os.path.join(dir_source, 'depth', f'{i:04d}.png')} {os.path.join(depth_dir, f'{i:04d}.png')}")
+ os.system(f"cp {os.path.join(dir_source, 'masks', f'{i:04d}.png')} {os.path.join(masks_dir, f'{i:04d}.png')}")
+ os.system(f"cp {os.path.join(dir_source, 'rgb', f'{i:04d}.png')} {os.path.join(rgb_dir, f'{i:04d}.png')}")
+ os.system(f"cp {os.path.join(dir_source, 'annotated_poses', f'{i:04d}.txt')} {os.path.join(annotated_poses_dir, f'{i:04d}.txt')}")
+
+os.system(f"cp {os.path.join(dir_source, 'cam_K.txt')} {cam_K_file}")
diff --git a/obselete/run.sh b/obselete/run.sh
new file mode 100755
index 0000000..4bbe548
--- /dev/null
+++ b/obselete/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+# export PYTHONPATH=${PWD}:${PYTHONPATH} # needed with the structural-changes
+# export LD_LIBRARY_PATH=/opt/conda/lib:${LD_LIBRARY_PATH}
+
+# python bundlenets/run_custom.py --run-name=00_slice200 --vision-asset=2022-11-18-15-10-24_milk --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# iter 1 (original bsdf) first stage and second stage
+# cube, bottle, half, milk, toblerone, prism, egg, napkin, box
+# bakingbox, burger, cardboard, chocolate, cream, croc, crushedcan, duck, gallon, greencan, hotdog, icetray, mug, oatly, pinkcan, stapler, styrofoam, toothpaste
+
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bakingbox_1 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bakingbox_1-2 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bakingbox_1-3 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bakingbox_1-4 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bakingbox_1-5 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=cube_1-3 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=milk_3 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2
+
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=cubeslow_2 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+
+# ### iter 2
+
+# python bundlenets/run_custom.py --run-name=00 --vision-asset=bottle_1 --mode=run_video --pll-id=00 --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --tracking-run-name=00 --run-name=00-r4 --vision-asset=bottle_1-2 --mode=test_loss --no-pll --pll-id=00 --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# python bundlenets/run_custom.py --tracking-run-name=00 --run-name=00-r4 --vision-asset=half_1-3 --mode=test_loss --no-pll --pll-id=00 --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+
+# # create a loop for above:
+# objs=("cube" "milk" "bottle" "half" "toblerone" "prism" "egg" "napkin")
+# nums=("1" "1-2" "1-3" "1-4" "1-5")
+# for obj in ${objs[@]}; do
+# for num in ${nums[@]}; do
+# python bundlenets/run_custom.py --run-name=00-mhc --vision-asset=${obj}_${num} --mode=run_video --mode=test_loss --tracking-run-name=00 --pll-id=00 --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+# done
+# done
+
+
+# create a loop for below:
+# bakingbox, burger, cardboard, chocolate, cream, croc, crushedcan, duck, gallon, greencan, hotdog, icetray, mug, oatly, pinkcan, stapler, styrofoam, toothpaste
+# objs=("bakingbox")
+objs=("burger" "cardboard" "chocolate" "cream" "croc" "crushedcan" "duck" "gallon" "greencan" "hotdog" "icetray" "mug" "oatly" "pinkcan" "stapler" "styrofoam" "toothpaste")
+nums=("1" "1-2" "1-3" "1-4" "1-5")
+for obj in ${objs[@]}; do
+ for num in ${nums[@]}; do
+ # python bundlenets/run_custom.py --run-name=00-nhc --vision-asset=${obj}_${num} --mode=test_loss --tracking-run-name=00 --pll-id=00-smalldisk --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+ python bundlenets/run_custom.py --run-name=00-nhc --vision-asset=${obj}_${num} --mode=run_video --no-hpc --pll-id=00 --cycle-iteration=2 --use-segmenter=1 --use-gui=0 --debug-level=2 --clear-data
+ done
+done
+
+# # ### visualization
+# # python bundlenets/vis_utils.py
\ No newline at end of file
diff --git a/obselete/run_loop_0.sh b/obselete/run_loop_0.sh
new file mode 100755
index 0000000..c759eb3
--- /dev/null
+++ b/obselete/run_loop_0.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+### 1. run pll on tagslam trajectory (in venv)
+python examples/contactnets_vision.py --run-name=00 --vision-asset=cube_2 --cycle-iteration=0
+
+### 2. run conversion from pll `results` to bundlesdf `geometry` (in venv)
+# source /opt/ros/noetic/setup.bash
+# source ../../robot_filter/venv/bin/activate
+
+### Need pure bundlesdf already run on the corresponding toss
+python conversion_pll_to_bsdf.py --vision-asset=cube_2 --pll-id=00 --cycle-iteration=0
+
+### 3.1. run bundlesdf using the pll geometry (in docker)
+python bundlenets/run_custom.py --run-name=01 --vision-asset=cube_2 --mode=run_video --pll-id=00 --cycle-iteration=0 --use-segmenter=1 --use-gui=0 --debug-level=2
+
+### 3.2. run bundlesdf without using the pll geometry (in docker)
+python bundlenets/run_custom.py --run-name=01_01 --vision-asset=cube_2 --mode=run_video --pll-id=00 --no-pll --cycle-iteration=0 --use-segmenter=1 --use-gui=0 --debug-level=2
diff --git a/obselete/run_loop_1.sh b/obselete/run_loop_1.sh
new file mode 100755
index 0000000..b30a397
--- /dev/null
+++ b/obselete/run_loop_1.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+### 1. run bundlesdf
+python bundlenets/run_custom.py --run-name=00 --vision-asset=cube_1 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2
+
+### 2. run conversion from bundlesdf `results` to pll `assets`
+python cnets-data-generation/conversion_bsdf_to_pll.py --vision-asset=cube_1 --bundlesdf-id=00 --cycle-iteration=1
+
+### 3. run pll using bundlesdf trajectory
+python dair_pll/examples/contactnets_vision.py --run-name=01 --vision-asset=cube_1 --cycle-iteration=1 --bundlesdf-id=00
+# python bundlesdf_interface.py
+
+### 4. run conversion from pll `results` to bundlesdf `geometry`
+python cnets-data-generation/conversion_pll_to_bsdf.py --vision-asset=cube_1 --pll-id=01 --cycle-iteration=1
+
+### 5. run bundlesdf using pll geometry
+python bundlenets/run_custom.py --run-name=02 --vision-asset=cube_1 --mode=run_video --use-segmenter=1 --use-gui=0 --debug-level=2 --pll-id=01 --cycle-iteration=2
\ No newline at end of file
diff --git a/obselete/util_check_bsdf_convexity_source.py b/obselete/util_check_bsdf_convexity_source.py
new file mode 100644
index 0000000..ef3fbfe
--- /dev/null
+++ b/obselete/util_check_bsdf_convexity_source.py
@@ -0,0 +1,61 @@
+### Check how the convexity loss is implemented.
+
+### If most belong to bipart_sample_list and about 1/6 of the samples are in uniform_sample_list,
+### then the convexity loss is on points interpolated between the support points and the visible points and points interpolated among support points.
+### If most belong to uniform_sample_list, then the convexity loss is on points randomly interpolated among the support points and the visible points.
+
+import os
+import os.path as osp
+import yaml
+import torch
+
+results_root = "results"
+
+iter_nums = ["bundlesdf_iteration_2"]
+# iter_nums = None
+bsdf_ids = ["bundlesdf_id_03"]
+# bsdf_ids = None
+
+uniform_sample_list = []
+bipart_sample_list = []
+not_found_list = []
+obj_tosses = os.listdir(results_root)
+obs_tosses_filtered = [obj_toss for obj_toss in obj_tosses if "1" in obj_toss]
+obj_tosses = obs_tosses_filtered
+obj_tosses = sorted(obj_tosses)
+# print(obj_tosses)
+for obj_toss in obj_tosses:
+ obj_toss_path = osp.join(results_root, obj_toss)
+ if iter_nums is None:
+ iter_nums = os.listdir(obj_toss_path)
+ for iter_num in iter_nums:
+ obj_toss_iter_path = osp.join(obj_toss_path, iter_num)
+ if bsdf_ids is None:
+ bsdf_ids = os.listdir(obj_toss_iter_path)
+ for bsdf_id in bsdf_ids:
+ obj_toss_iter_bsdf_id_path = osp.join(obj_toss_iter_path, bsdf_id)
+ # results/bakingbox_1-4/bundlesdf_iteration_2/bundlesdf_id_02/nerf_runs/bundlesdf_id_02/sdf_inspection/interp_end_pts.pt
+ interp_end_file = osp.join(obj_toss_iter_bsdf_id_path, "nerf_runs", bsdf_id, "sdf_inspection", "interp_end_pts.pt")
+ if not osp.exists(interp_end_file):
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+ continue
+ pts = torch.load(interp_end_file)
+ if len(pts) % 6 == 0:
+ # print(f"Found {obj_toss}/{iter_num}/{bsdf_id}")
+ uniform_sample_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ bipart_sample_list.append(obj_toss_iter_bsdf_id_path)
+
+print("-----------uniform_sample_list:")
+for uniform_sample in uniform_sample_list:
+ print(uniform_sample)
+print("-----------bipart_sample_list:")
+for bipart_sample in bipart_sample_list:
+ print(bipart_sample)
+print("-----------not_found_list:")
+for not_found in not_found_list:
+ print(not_found)
+print(f"uniform_sample_list {len(uniform_sample_list)}")
+print(f"bipart_sample_list {len(bipart_sample_list)}")
+print(f"not_found_list {len(not_found_list)}")
\ No newline at end of file
diff --git a/obselete/util_check_bsdf_result_generated.py b/obselete/util_check_bsdf_result_generated.py
new file mode 100644
index 0000000..c92e2e5
--- /dev/null
+++ b/obselete/util_check_bsdf_result_generated.py
@@ -0,0 +1,75 @@
+### Check which experiements have been run and whether they were run with certain configurations.
+
+import os
+import os.path as osp
+import yaml
+
+results_root = "results"
+
+iter_nums = ["bundlesdf_iteration_1"]
+# iter_nums = None
+bsdf_ids = ["bundlesdf_id_00-cvwo-occ"]
+# bsdf_ids = None
+nerf_id = "bundlesdf_id_00-cvwo-occ2x2"
+
+found_list = []
+not_found_list = []
+
+## objects evaluated with geometry
+# objs=("bakingbox" "cardboard" "crushedcan" "gallon" "greencan" "oatly" "pinkcan" "stapler" "styrofoam" "egg" "napkin" "cube" "bottle" "half" "milk")
+objs = ["bakingbox", "cardboard", "crushedcan", "gallon", "greencan", "oatly", "pinkcan", "stapler", "styrofoam", "egg", "napkin", "cube", "bottle", "half", "milk"]
+tosses = ["1", "2", "3", "4", "5"]
+obj_tosses = [f"{obj}_{toss}" for obj in objs for toss in tosses]
+
+# obj_tosses = os.listdir(results_root)
+# # obs_tosses_filtered = [obj_toss for obj_toss in obj_tosses if "1" in obj_toss] # starting with 1
+# obs_tosses_filtered = [obj_toss for obj_toss in obj_tosses if len(obj_toss.split("_")[-1]) == 1] # single digit tosses
+# obj_tosses = obs_tosses_filtered
+# obj_tosses = sorted(obj_tosses)
+
+# print(obj_tosses)
+for obj_toss in obj_tosses:
+ obj_toss_path = osp.join(results_root, obj_toss)
+ if iter_nums is None:
+ iter_nums = os.listdir(obj_toss_path)
+ for iter_num in iter_nums:
+ obj_toss_iter_path = osp.join(obj_toss_path, iter_num)
+ if bsdf_ids is None:
+ bsdf_ids = os.listdir(obj_toss_iter_path)
+ for bsdf_id in bsdf_ids:
+ obj_toss_iter_bsdf_id_path = osp.join(obj_toss_iter_path, bsdf_id)
+ if osp.exists(obj_toss_iter_bsdf_id_path):
+ # results/duck_1-3/bundlesdf_iteration_1/bundlesdf_id_01/nerf_runs/bundlesdf_id_01/mesh_cleaned.obj
+ mesh_path = osp.join(obj_toss_iter_bsdf_id_path, "nerf_runs", nerf_id, "mesh_cleaned.obj")
+ if osp.exists(mesh_path):
+ # print(f"Found {obj_toss}/{iter_num}/{bsdf_id}")
+ found_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+
+print("-----------Found:")
+for found in found_list:
+ print(found)
+print("-----------Not found:")
+for not_found in not_found_list:
+ print(not_found)
+print(f"Found {len(found_list)}")
+
+# ### Check if toss_frames is on for the experiments that were found
+# for obj_toss_iter_bsdf_id_path in found_list:
+# config_path = osp.join(obj_toss_iter_bsdf_id_path, "config_bundlesdf.yml")
+# with open(config_path, "r") as f:
+# config = yaml.safe_load(f)
+# config_nerf_path = osp.join(obj_toss_iter_bsdf_id_path, "config_nerf.yml")
+# with open(config_nerf_path, "r") as f:
+# config_nerf = yaml.safe_load(f)
+# if "toss_frames" in config and len(config["toss_frames"]) > 0 and len(config_nerf["contact_in_cam_dir"])> 0:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames on, num_toss_frames: {len(config['toss_frames'])}")
+# else:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames off")
+
+
\ No newline at end of file
diff --git a/obselete/util_check_bsdf_to_pll_conversion.py b/obselete/util_check_bsdf_to_pll_conversion.py
new file mode 100644
index 0000000..bb1e6a9
--- /dev/null
+++ b/obselete/util_check_bsdf_to_pll_conversion.py
@@ -0,0 +1,66 @@
+### Check which experiements have been run and whether they were run with certain configurations.
+
+import os
+import os.path as osp
+import yaml
+
+results_root = "dair_pll/assets"
+
+iter_nums = ["bundlesdf_iteration_2"]
+# iter_nums = None
+bsdf_ids = ["bundlesdf_id_04"]
+# bsdf_ids = None
+
+found_list = []
+not_found_list = []
+objs = os.listdir(results_root)
+objs_filtered = [obj for obj in objs if "vision_" in obj and osp.isdir(osp.join(results_root, obj))]
+objs = objs_filtered
+objs = sorted(objs)
+for obj in objs:
+ obj_path = osp.join(results_root, obj)
+ tosses = os.listdir(obj_path)
+ obs_tosses_filtered = [obj_toss for obj_toss in tosses if "1" in obj_toss]
+ obj_tosses = obs_tosses_filtered
+ obj_tosses = sorted(obj_tosses)
+ # print(obj_tosses)
+ for obj_toss in obj_tosses:
+ # obj_toss_path = osp.join(results_root, obj, obj_toss, 'geom_for_pll')
+ obj_toss_path = osp.join(results_root, obj, obj_toss, 'toss')
+ if iter_nums is None:
+ iter_nums = os.listdir(obj_toss_path)
+ for iter_num in iter_nums:
+ obj_toss_iter_path = osp.join(obj_toss_path, iter_num)
+ if bsdf_ids is None:
+ bsdf_ids = os.listdir(obj_toss_iter_path)
+ for bsdf_id in bsdf_ids:
+ obj_toss_iter_bsdf_id_path = osp.join(obj_toss_iter_path, bsdf_id)
+ if osp.exists(obj_toss_iter_bsdf_id_path):
+ # print(f"Found {obj_toss}/{iter_num}/{bsdf_id}")
+ found_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+
+print("-----------Found:")
+for found in found_list:
+ print(found)
+print("-----------Not found:")
+for not_found in not_found_list:
+ print(not_found)
+print(f"Found {len(found_list)}")
+
+# ### Check if toss_frames is on for the experiments that were found
+# for obj_toss_iter_bsdf_id_path in found_list:
+# config_path = osp.join(obj_toss_iter_bsdf_id_path, "config_bundlesdf.yml")
+# with open(config_path, "r") as f:
+# config = yaml.safe_load(f)
+# config_nerf_path = osp.join(obj_toss_iter_bsdf_id_path, "config_nerf.yml")
+# with open(config_nerf_path, "r") as f:
+# config_nerf = yaml.safe_load(f)
+# if "toss_frames" in config and len(config["toss_frames"]) > 0 and len(config_nerf["contact_in_cam_dir"])> 0:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames on, num_toss_frames: {len(config['toss_frames'])}")
+# else:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames off")
+
+
\ No newline at end of file
diff --git a/obselete/util_check_octree.py b/obselete/util_check_octree.py
new file mode 100644
index 0000000..b0aa81a
--- /dev/null
+++ b/obselete/util_check_octree.py
@@ -0,0 +1,27 @@
+import open3d as o3d
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+
+# fname = "results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo-occ/nerf_runs/bundlesdf_id_00-cvwo-occ/nerf/octree_boxes_max_level.ply"
+fname = "results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo-occ/nerf_runs/bundlesdf_id_00-cvwo-occ2x2/nerf/octree_boxes_max_level.ply"
+# fname = "results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo-occ/nerf_runs/bundlesdf_id_00-cvwo-occ/nerf/octree_boxes_ray_tracing_level.ply"
+# fname = "results/bakingbox_1/bundlesdf_iteration_1/bundlesdf_id_00-cvwo-occ/nerf_runs/bundlesdf_id_00-cvwo-occ2x2/nerf/octree_boxes_ray_tracing_level.ply"
+
+pcd = o3d.io.read_point_cloud(fname)
+points = np.asarray(pcd.points)
+print(points.shape)
+
+fig = plt.figure()
+ax = fig.add_subplot(111, projection='3d')
+ax.scatter(points[:,0], points[:,1], points[:,2], cmap='viridis', c=points[:,0]+points[:,2])
+# Set labels
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_zlabel('Z')
+# Make axis equal
+ax.set_xlim(-1, 1)
+ax.set_ylim(-1, 1)
+ax.set_zlim(-1, 1)
+
+plt.show()
diff --git a/obselete/util_check_pll_result_generated.py b/obselete/util_check_pll_result_generated.py
new file mode 100644
index 0000000..f2b9a12
--- /dev/null
+++ b/obselete/util_check_pll_result_generated.py
@@ -0,0 +1,70 @@
+### Check which experiements have been run and whether they were run with certain configurations.
+
+import os
+import os.path as osp
+import yaml
+
+# dair_pll/results/vision_bakingbox/bakingbox_1-3/bundlesdf_iteration_1/pll_id_01/geom_for_bsdf/support_points.pt
+# dair_pll/results/vision_bakingbox/bakingbox_1-2/bundlesdf_iteration_1/pll_id_01/geom_for_bsdf/from_support_points/p_toss_frames.pt
+results_root = "dair_pll/results"
+
+iter_nums = ["bundlesdf_iteration_2"]
+# iter_nums = None
+pll_ids = ["pll_id_04"]
+# pll_ids = None
+
+found_list = []
+not_found_list = []
+objs = os.listdir(results_root)
+for obj in objs:
+ obj_path = osp.join(results_root, obj)
+ obj_tosses = os.listdir(obj_path)
+ obs_tosses_filtered = [obj_toss for obj_toss in obj_tosses if "1" in obj_toss]
+ obj_tosses = obs_tosses_filtered
+ obj_tosses = sorted(obj_tosses)
+ # print(obj_tosses)
+ for obj_toss in obj_tosses:
+ obj_toss_path = osp.join(obj_path, obj_toss)
+ if iter_nums is None:
+ iter_nums = os.listdir(obj_toss_path)
+ for iter_num in iter_nums:
+ obj_toss_iter_path = osp.join(obj_toss_path, iter_num)
+ if pll_ids is None:
+ pll_ids = os.listdir(obj_toss_iter_path)
+ for pll_id in pll_ids:
+ obj_toss_iter_pll_id_path = osp.join(obj_toss_iter_path, pll_id)
+ if osp.exists(obj_toss_iter_pll_id_path):
+ # dair_pll/results/vision_cube/cube_1-4/bundlesdf_iteration_1/pll_id_01/geom_for_bsdf/from_mesh_surface/p_toss_frames.pt
+ pt_path = osp.join(obj_toss_iter_pll_id_path, "geom_for_bsdf/from_support_points/p_toss_frames.pt")
+ if osp.exists(pt_path):
+ # print(f"Found {obj_toss}/{iter_num}/{pll_id}")
+ found_list.append(obj_toss_iter_pll_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{pll_id}")
+ not_found_list.append(obj_toss_iter_pll_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{pll_id}")
+ not_found_list.append(obj_toss_iter_pll_id_path)
+
+print("-----------Found:")
+for found in found_list:
+ print(found)
+print("-----------Not found:")
+for not_found in not_found_list:
+ print(not_found)
+print(f"Found {len(found_list)}")
+
+# ### Check if toss_frames is on for the experiments that were found
+# for obj_toss_iter_pll_id_path in found_list:
+# config_path = osp.join(obj_toss_iter_pll_id_path, "config_bundlesdf.yml")
+# with open(config_path, "r") as f:
+# config = yaml.safe_load(f)
+# config_nerf_path = osp.join(obj_toss_iter_pll_id_path, "config_nerf.yml")
+# with open(config_nerf_path, "r") as f:
+# config_nerf = yaml.safe_load(f)
+# if "toss_frames" in config and len(config["toss_frames"]) > 0 and len(config_nerf["contact_in_cam_dir"])> 0:
+# print(f"{obj_toss_iter_pll_id_path} has toss_frames on, num_toss_frames: {len(config['toss_frames'])}")
+# else:
+# print(f"{obj_toss_iter_pll_id_path} has toss_frames off")
+
+
\ No newline at end of file
diff --git a/obselete/util_check_pll_to_bsdf_conversion.py b/obselete/util_check_pll_to_bsdf_conversion.py
new file mode 100644
index 0000000..4e74382
--- /dev/null
+++ b/obselete/util_check_pll_to_bsdf_conversion.py
@@ -0,0 +1,65 @@
+### Check which experiements have been run and whether they were run with certain configurations.
+
+import os
+import os.path as osp
+import yaml
+
+results_root = "geometry"
+
+iter_nums = ["bundlesdf_iteration_1"]
+# iter_nums = None
+pll_ids = ["pll_id_01"]
+# pll_ids = None
+
+found_list = []
+not_found_list = []
+obj_tosses = os.listdir(results_root)
+obs_tosses_filtered = [obj_toss for obj_toss in obj_tosses if "1" in obj_toss]
+obj_tosses = obs_tosses_filtered
+obj_tosses = sorted(obj_tosses)
+# print(obj_tosses)
+for obj_toss in obj_tosses:
+ obj_toss_path = osp.join(results_root, obj_toss)
+ if iter_nums is None:
+ iter_nums = os.listdir(obj_toss_path)
+ for iter_num in iter_nums:
+ obj_toss_iter_path = osp.join(obj_toss_path, iter_num)
+ if pll_ids is None:
+ pll_ids = os.listdir(obj_toss_iter_path)
+ for bsdf_id in pll_ids:
+ obj_toss_iter_bsdf_id_path = osp.join(obj_toss_iter_path, bsdf_id)
+ if osp.exists(obj_toss_iter_bsdf_id_path):
+ # geometry/prism_1-2/bundlesdf_iteration_1/pll_id_00/contact_in_cam/from_support_points
+ mesh_path = osp.join(obj_toss_iter_bsdf_id_path, "contact_in_cam", "from_support_points")
+ if osp.exists(mesh_path):
+ # print(f"Found {obj_toss}/{iter_num}/{bsdf_id}")
+ found_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+ else:
+ # print(f"Did not find {obj_toss}/{iter_num}/{bsdf_id}")
+ not_found_list.append(obj_toss_iter_bsdf_id_path)
+
+print("-----------Found:")
+for found in found_list:
+ print(found)
+print("-----------Not found:")
+for not_found in not_found_list:
+ print(not_found)
+print(f"Found {len(found_list)}")
+
+# ### Check if toss_frames is on for the experiments that were found
+# for obj_toss_iter_bsdf_id_path in found_list:
+# config_path = osp.join(obj_toss_iter_bsdf_id_path, "config_bundlesdf.yml")
+# with open(config_path, "r") as f:
+# config = yaml.safe_load(f)
+# config_nerf_path = osp.join(obj_toss_iter_bsdf_id_path, "config_nerf.yml")
+# with open(config_nerf_path, "r") as f:
+# config_nerf = yaml.safe_load(f)
+# if "toss_frames" in config and len(config["toss_frames"]) > 0 and len(config_nerf["contact_in_cam_dir"])> 0:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames on, num_toss_frames: {len(config['toss_frames'])}")
+# else:
+# print(f"{obj_toss_iter_bsdf_id_path} has toss_frames off")
+
+
\ No newline at end of file
diff --git a/readme.md b/readme.md
index 11c4a29..4b84324 100644
--- a/readme.md
+++ b/readme.md
@@ -1,102 +1,367 @@
-# BundleSDF: Neural 6-DoF Tracking and 3D Reconstruction of Unknown Objects
+# Vysics: Object Reconstruction Under Occlusion by Fusing Vision and Contact-Rich Physics (RSS 2025)
-This is an implementation of our paper published in CVPR 2023
+[[Arxiv](https://arxiv.org/abs/2504.18719)] [[Project page](https://vysics-vision-and-physics.github.io/)]
-[[Arxiv](https://arxiv.org/abs/2303.14158)] [[Project page](https://bundlesdf.github.io/)] [[Supplemental video](https://www.youtube.com/watch?v=5PymzKbKv8w/)]
+## Abstract
+We introduce Vysics, a vision-and-physics framework for a robot to build an expressive geometry and dynamics model of a single rigid body, using a seconds-long RGBD video and the robot's proprioception. While the computer vision community has built powerful visual 3D perception algorithms, cluttered environments with heavy occlusions can limit the visibility of objects of interest. However, observed motion of partially occluded objects can imply physical interactions took place, such as contact with a robot or the environment. These inferred contacts can supplement the visible geometry with “physible geometry,” which best explains the observed object motion through physics. Vysics uses a vision-based tracking and reconstruction method, BundleSDF, to estimate the trajectory and the visible geometry from an RGBD video, and an odometry-based model learning method, Physics Learning Library (PLL), to infer the “physible” geometry from the trajectory through implicit contact dynamics optimization. The visible and “physible” geometries jointly factor into optimizing a signed distance function (SDF) to represent the object shape. Vysics does not require pretraining, nor tactile or force sensors. Compared with vision-only methods, Vysics yields object models with higher geometric accuracy and better dynamics prediction in experiments where the object interacts with the robot and the environment under heavy occlusion.
-# Abstract
-We present a near real-time method for 6-DoF tracking of an unknown object from a monocular RGBD video sequence, while simultaneously performing neural 3D reconstruction of the object. Our method works for arbitrary rigid objects, even when visual texture is largely absent. The object is assumed to be segmented in the first frame only. No additional information is required, and no assumption is made about the interaction agent. Key to our method is a Neural Object Field that is learned concurrently with a pose graph optimization process in order to robustly accumulate information into a consistent 3D representation capturing both geometry and appearance. A dynamic pool of posed memory frames is automatically maintained to facilitate communication between these threads. Our approach handles challenging sequences with large pose changes, partial and full occlusion, untextured surfaces, and specular highlights. We show results on HO3D, YCBInEOAT, and BEHAVE datasets, demonstrating that our method significantly outperforms existing approaches.
+[](media/vysics_intro_spatial_small.mp4)
-
-
-
-
-
-
-# Bibtex
+## Bibtex
```bibtex
-@InProceedings{bundlesdfwen2023,
-author = {Bowen Wen and Jonathan Tremblay and Valts Blukis and Stephen Tyree and Thomas M\"{u}ller and Alex Evans and Dieter Fox and Jan Kautz and Stan Birchfield},
-title = {{BundleSDF}: {N}eural 6-{DoF} Tracking and {3D} Reconstruction of Unknown Objects},
-booktitle = {CVPR},
-year = {2023},
+@inproceedings{bianchini2025vysics,
+ title={Vysics: Object Reconstruction Under Occlusion by Fusing Vision and Contact-Rich Physics},
+ author={Bibit Bianchini and Minghan Zhu and Mengti Sun and Bowen Jiang and Camillo J. Taylor and Michael Posa},
+ year={2025},
+ month={june},
+ booktitle={Robotics: Science and Systems (RSS)},
+ website={https://vysics-vision-and-physics.github.io/},
}
```
-# Data download
+## Third-party model download
- Download pretrained [weights of segmentation network](https://drive.google.com/file/d/1MEZvjbBdNAOF7pXcq6XPQduHeXB50VTc/view?usp=share_link), and put it under
`./BundleTrack/XMem/saves/XMem-s012.pth`
- Download pretrained [weights of LoFTR outdoor_ds.ckpt](https://drive.google.com/drive/folders/1xu2Pq6mZT5hmFgiYMBT9Zt8h1yO-3SIp), and put it under
`./BundleTrack/LoFTR/weights/outdoor_ds.ckpt`
-- Download HO3D data. We provide the augmented data that you can download [here](https://drive.google.com/drive/folders/1Wk-HZDvUExyUrRn7us4WWEbHnnFHgOAX?usp=share_link). Then download YCB-Video object models from [here](https://drive.google.com/file/d/1-1m7qMMyUHYLhaRiQBbsSRMt5dMRX4jD/view?usp=share_link). Finally, make sure the structure is like below, and update your root path of `HO3D_ROOT` at the top of `BundleTrack/scripts/data_reader.py`
- ```
- HO3D_v3
- ├── evaluation
- ├── models
- └── masks_XMem
- ```
-
-# Docker/Environment setup
+## Docker/Environment setup
- Build the docker image (this only needs to do once and can take some time).
```
cd docker
-docker build --network host -t nvcr.io/nvidian/bundlesdf .
+docker build --network host -t vysics:cu118 .
```
- Start a docker container the first time
```
-cd docker && bash run_container.sh
+cd docker && bash run_custom_container.sh
# Inside docker container, compile the packages which are machine dependent
+cd ${PROJECT_ROOT}
bash build.sh
+
+# Then build pll
+cd dair_pll
+pip install -e .
+
+cd ${PROJECT_ROOT}
+export PYTHONPATH=${PWD}:${PYTHONPATH}
+
+# Configure the shell to use ROS
+source /opt/ros/noetic/setup.bash
```
-# Run on your custom data
-- Prepare your RGBD video folder as below (also refer to the example milk data). You can find an [example milk data here](https://drive.google.com/file/d/1akutk_Vay5zJRMr3hVzZ7s69GT4gxuWN/view?usp=share_link) for testing.
+## Terminology
+One Vysics run is associated with one BundleSDF run with a set of pose estimates, one PLL run, and one BundleSDF NeRF run whose shape jointly optimized visible and physible geometries. To keep track of these results, we use the following labels:
+ - `{ASSET_TYPE}` and `{EXP_NUM}` make up an `{ASSET_NAME}` via `{ASSET_TYPE}_{EXP_NUM}`, e.g. `robotocc_bottle`, `1` make up the asset `robotocc_bottle_1`. These are the names for the experiments.
+ - `{BSDF_ID}` and `{NERF_BSDF_ID}`, which can be any string. This labels a BundleSDF run and a BundleSDF NeRF sub-run, respectively. For one "experiment" (denoted by `{ASSET_NAME}`), there can be multiple "runs" (denoted by `{BSDF_ID}`) for generating results. For one BundleSDF "run" (denoted by `{BSDF_ID}`), there can be multiple "NeRF runs" (denoted by `{NERF_BSDF_ID}`), always including `{BSDF_ID}` itself. This structure is inherited by the BundleSDF repo design.
+ - `{PLL_ID}` can be any string. It similarly labels a PLL run. Many PLL runs can be associated with the trajectory results of one BundleSDF run.
+ - `{CYCLE_ITER}` is a number 1+ and represents how many iterations of BundleSDF pose estimates were in an experiment's history. In the Vysics RSS 2025 paper, all results were cycle iteration 1, though it is possible to get higher cycle iterations by running BundleSDF for the second time as a full BundleSDF run (with new `{BSDF_ID}`) and not just a NeRF-only run (a new `{NERF_BSDF_ID}` inheriting an existing `{BSDF_ID}`); this creates a new set of pose estimates that factor in the physics insights from the last associated PLL run. All of these count as cycle iteration 1 results:
+ - Running BundleSDF for the first time.
+ - Running PLL on a BundleSDF cycle iteration 1 result's pose estimates.
+ - Running BundleSDF NeRF-only, using PLL cycle iteration 1 results.
+
+## Structure of Input Data
+Our dataset are converted from rosbags. See later sections for a detailed explanation for that process. To make it easier to get started, we provide sample data that are processed are ready to use. Download from the following links:
+- [RGBD data](https://drive.google.com/file/d/1zxacGHDthBNOG4REeZ690BsS2fh1rKJV/view?usp=sharing)
+- [Robot data](https://drive.google.com/file/d/1NIC812JiFWl6HeBUdagdTd1FR1bnSr1Q/view?usp=sharing)
+- [Object shape data](https://drive.google.com/file/d/1eJwmfBEL7pL500-4vI3Uwk2eZiEYBo2N/view?usp=sharing) (for evaluation only, save to somewhere you like for now)
+
+The downloaded data should be extracted and put in the following file structure:
```
-root
- ├──rgb/ (PNG files)
- ├──depth/ (PNG files, stored in mm, uint16 format. Filename same as rgb)
- ├──masks/ (PNG files. Filename same as rgb. 0 is background. Else is foreground)
- └──cam_K.txt (3x3 intrinsic matrix, use space and enter to delimit)
+PROJECT_ROOT
+├── data
+│ └── {ASSET_NAME} (e.g., robotocc_bakingbox_1)
+│ ├── rgb/ (PNG files)
+│ ├── depth/ (PNG files, stored in mm, uint16 format. Filename same as rgb)
+│ ├── masks/ (PNG files. Filename same as rgb. 0 is background. Else is foreground)
+│ └── cam_K.txt (3x3 intrinsic matrix, use space and enter to delimit)
+└── cnets-data-generation
+ └── dataset
+ └── {ASSET_NAME}
+ ├── bundlesdf_timestamps.txt
+ ├── ee_positions.txt
+ ├── joint_angles.txt
+ ├── joint_efforts.txt
+ ├── joint_names.txt
+ ├── joint_times.txt
+ ├── joint_velocities.txt
+ ├── synced_ee_positions.txt
+ ├── synced_joint_angles.txt
+ ├── synced_joint_efforts.txt
+ └── synced_joint_velocities.txt
```
+There are other data files that are needed for inference, but they are already included in the repo. These include:
+```
+PROJECT_ROOT
+├── cnets-data-generation
+│ ├── table_calibration/table_heights.yaml
+│ ├── robot_dynamics
+│ │ ├── franka_control_node.yaml
+│ │ └── franka_hw_controllers.yaml
+│ └── assets
+│ ├── true_urdfs/bsdf_mesh_average_dynamics.urdf
+│ ├── cam_K_robotocc.txt
+│ ├── config.yaml
+│ └── realsense_pose_robotocc.yaml
+└── dair_pll
+ └── assets
+ ├── precomputed_vision_functions/...
+ ├── franka_with_ee.urdf
+ ├── franka_without_geom_with_ee.urdf
+ └── vision_template.urdf
+```
+If you create your own dataset, you may need to modify these files.
+
+## Running Experiments
+A script for running a Vysics experiment after obtaining/generating the dataset is provided as `run_all.sh`. It contains 6 steps:
+1. **Run BundleSDF with tracking and NeRF:** generates pose estimates and a vision-only shape estimate. _Run from `vysics` repo._
+2. **Convert from BundleSDF to PLL:** converts trajectory to PLL format and extracts vision insights to factor into physics learning. _Run from this `cnets-data-generation` repo._
+3. **Run PLL:** generates a physics-based shape estimate and other dynamics parameter estimates. _Run from `dair_pll` repo._
+4. **Convert from PLL to BundleSDF:** moves physics-based shape information into vision input directory. _Run from this `cnets-data-generation` repo._
+5. **Run BundleSDF with NeRF only:** generates a vision- and physics-informed new shape estimate. _Run from `vysics` repo._
+6. **Convert from BundleSDF to PLL:** required step before evaluating. _Run from this `cnets-data-generation` repo._
+
-Due to license issues, we are not able to include [XMem](https://github.com/hkchengrex/XMem) in this codebase for running segmentation online. If you are interested in doing so, please download the code separately and add a wrapper in `segmentation_utils.py`.
+### Running BundleSDF
+Start BundleSDF runs from the `vysics` repo.
-- Run your RGBD video (specify the video_dir and your desired output path). There are 3 steps.
+For running a BundleSDF _tracking and NeRF_ run from scratch with BundleSDF ID `{BSDF_ID}`:
+```
+python bundlenets/run_custom.py --vision-asset={ASSET_NAME} --run-name={BSDF_ID} --use-gui=0 --debug-level=2
+```
+
+For running a BundleSDF NeRF run only with NeRF ID `{NERF_BSDF_ID}` under an existing BundleSDF tracking result with ID `{BSDF_ID}` using supervision from PLL run `{PLL_ID}`:
+```
+python bundlenets/run_custom.py --vision-asset={ASSET_NAME} --tracking-run-name={BSDF_ID} --share-tracking --run-name={NERF_BSDF_ID} --pll-id={PLL_ID} --use-gui=0 --debug-level=2
```
-# 1) Run joint tracking and reconstruction
-python run_custom.py --mode run_video --video_dir /home/bowen/debug/2022-11-18-15-10-24_milk --out_folder /home/bowen/debug/bundlesdf_2022-11-18-15-10-24_milk --use_segmenter 1 --use_gui 1 --debug_level 2
+For both, add `--cycle-iteration={CYCLE_ITER}` as necessary.
-# 2) (Optinal) Run global refinement post-processing to refine the mesh
-python run_custom.py --mode global_refine --video_dir /home/bowen/debug/2022-11-18-15-10-24_milk --out_folder /home/bowen/debug/bundlesdf_2022-11-18-15-10-24_milk # Change the path to your video_directory
-# 3) Get the auto-cleaned mesh
-python run_custom.py --mode get_mesh --video_dir /home/bowen/debug/2022-11-18-15-10-24_milk --out_folder /home/bowen/debug/bundlesdf_2022-11-18-15-10-24_milk
+### Converting from BundleSDF to PLL
+After iteration 1 of BundleSDF run with BundleSDF ID `{BSDF_ID}`:
```
+python conversion_bsdf_to_pll.py --vision-asset={ASSET_NAME} --bundlesdf-id={BSDF_ID}
+```
+By default, this assumes `{BSDF_ID}` is a cycle iteration 1 BundleSDF run, and that the NeRF geometry result to convert is under the shared ID `{BSDF_ID}`. To change either, include the flags `--cycle-iteration={CYCLE_ITER}` and/or `--nerf-bundlesdf-id={NERF_BSDF_ID}`.
+
+BundleSDF-to-PLL conversion does the following:
+ - **Pose conversion:** Converts the camera-frame, 4x4 homogeneous pose estimate transforms into PLL format, which uses world-frame coordinates, represents orientation with quaternions, includes velocity estimates, and includes Franka states, if the experiment featured any robot interactions.
+ - **Visible geometry insights:** Generates sampled points on the visible portion of the BundleSDF shape estimate's convex hull.
+
+There are other flags that may be of interest for faster conversions; see [the BundleSDF-to-PLL converter](./conversion_bsdf_to_pll.py) for documentation.
+
+### Running PLL
+Start a PLL run from the `dair_pll` folder with:
+```
+python examples/contactnets_vision.py --vision-asset={ASSET_NAME} --run-name={PLL_ID} --bundlesdf-id={BSDF_ID}
+```
+By default, this uses the pose _and_ geometry estimates from `{BSDF_ID}` as both the BundleSDF and NeRF IDs in cycle iteration 1. Add `--nerf-bundlesdf-id={NERF_BSDF_ID}` and/or `--cycle-iteration={CYCLE_ITER}` as necessary.
+
+There are other flags that may be of interest such as adjusting loss weights; see the Vysics PLL script [contactnets_vision.py](./dair_pll/examples/contactnets_vision.py) for documentation.
+
-- Finally the results will be dumped in the `out_folder`, including the tracked poses stored in `ob_in_cam/` and reconstructed mesh with texture `textured_mesh.obj`.
+### Converting from PLL to BundleSDF
+After iteration 1 of PLL run with PLL ID `{PLL_ID}`:
+```
+python conversion_pll_to_bsdf.py --vision-asset={ASSET_NAME} --pll-id={PLL_ID}
+```
+Add the appropriate `--cycle-iteration={CYCLE_ITER}` if necessary.
-
+PLL-to-BundleSDF conversion does the following:
+ - **Physible geometry insights:** Moves geometry outputs from PLL into input directories accessible to future BundleSDF runs.
+## Structure of Result Files
+With the introduced terminology, locations for results are:
+ - Pose estimates come from running BundleSDF. These are represented as 4x4 homogeneous transforms of the object pose in the camera frame. The estimates are located within a BundleSDF output folder:
+ ```
+ results/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/ob_in_cam/
+ ```
+ - To be usable by downstream PLL runs, these poses need to be converted into PLL format which will be located at a PLL input folder:
+ ```
+ dair_pll/assets/vision_{ASSET_TYPE}/{ASSET_NAME}/toss/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/{EXP_NUM}.pt
+ ```
+ - The `toss` subdirectory indicates trajectories where all of the contained dynamics can be explained by object-table and object-robot interactions, compared to the `full` subdirectory whose trajectories can include unmodeled human interventions. For Vysics RSS 2025 experiments, there were no human interventions, so `toss` and `full` trajectories are the same in length and content.
+ - Visible shape estimates come from running a NeRF experiment under a BundleSDF pose tracking experiment. The shape estimate is located within a BundleSDF output folder:
+ ```
+ results/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/nerf_runs/bundlesdf_id_{NERF_BSDF_ID}/mesh_cleaned.obj (normalized)
+
+ results/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/nerf_runs/bundlesdf_id_{NERF_BSDF_ID}/textured_mesh.obj (true scale)
+ ```
+ - To be usable by downstream PLL runs, this visible mesh needs to produce a set of points sampled on the visible portions of the convex hull of the above .obj file, subject to our vision-based PLL supervision loss (paper Eq. (6)). This set of points, along with other geometry information, will be located within a PLL input folder:
+ ```
+ dair_pll/assets/vision_{ASSET_TYPE}/{ASSET_NAME}/geom_for_pll/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/
+ ```
+ - Physics parameters, including physible shape, come from running a PLL experiment and are located in a PLL output folder: **[TODO `with_bundlesdf_mesh.urdf` is misleadingly named. It actually uses PLL-estiamted mesh. ]**
+ ```
+ dair_pll/results/vision_{ASSET_TYPE}/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/pll_id_{PLL_ID}/urdfs/
+ - with_bundlesdf_mesh.urdf
+ - body_best.obj
+ ```
+ - To be usable by downstream BundleSDF runs, the PLL geometry estimate needs to produce a set of points and associated signed distances subject to our support point loss (paper Eq. (8)) and a set of points and associated signed distance lower bounds subject to our hyperplane-constrained loss (paper Eq. (11)). These sets of points, along with other geometry information, will be located within a BundleSDF input folder:
+ ```
+ geometry/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/pll_id_{PLL_ID}/
+ ```
+ - This BundleSDF input directory contains `from_support_points` and `from_mesh_surface` subfolders. It worked well from our experience to supervise via support point loss (paper Eq. (8)) with data in `from_support_points` and via hyperplane-constrained loss (paper Eq. (11)) with data in `from_mesh_surface`. This helps ensure the stricter support point loss is applied on points that confidently hypothesized contact, while the less strict hyperplane-constrained loss can still be applied more broadly without encouraging unnecessary geometric convexity where visible signals may say otherwise.
+
+## Evaluation
+### Align the Ground Truth Mesh to the Estimation
+We use a combination of manual alignment and ICP to align the meshes. You can use the following script to do the manual part through keyboard, and the ICP will follow automatically:
+```
+python mesh_processing.py manual_icp --vision-asset={ASSET_NAME} --bundlesdf-id={BSDF_ID} --nerf-bundlesdf-id={NERF_BSDF_ID} --cycle-iteration=1
+```
+Or you can manually align the meshes using external tools (e.g., meshlab), and put the ground truth mesh that is aligned with the `textured_mesh.obj` output mesh to the follow path:
+```
+results/{ASSET_NAME}/bundlesdf_iteration_{CYCLE_ITER}/bundlesdf_id_{BSDF_ID}/nerf_runs/bundlesdf_id_{NERF_BSDF_ID}/true_geom_aligned_meshlab.obj
+```
+Then run `mesh_processing.py` with an extra flag `--meshlab`, which will only do the ICP part.
-# Run on HO3D dataset
+Either way, a folder for saving the evaluation results of this experiment will be created:
+```
+cnets-data-generation/evaluation/{ASSET_NAME}_bsdf_{BSDF_ID}_{NERF_BSDF_ID}_{CYCLE_ITER}/
```
-# Run BundleSDF to get the pose and reconstruction results
-python run_ho3d.py --video_dirs /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/SM1 --out_dir /home/bowen/debug/ho3d_ours
+and the aligned ground truth mesh will be saved to `true_geom_aligned_assist.obj` under this folder.
-# Benchmark the output results
-python benchmark_ho3d.py --video_dirs /mnt/9a72c439-d0a7-45e8-8d20-d7a235d02763/DATASET/HO3D_v3/evaluation/SM1 --out_dir /home/bowen/debug/ho3d_ours
+### Geometry Evaluation for an Experiment
+Evaluating a Vysics or BundleSDF run for geometry metrics can be done with [the evaluation script](evaluate.py) via:
```
+python evaluate.py --vision-asset={ASSET_NAME} --bundlesdf-id={BSDF_ID} --nerf-bundlesdf-id={NERF_BSDF_ID} --cycle-iteration={CYCLE_ITER}
+```
+
+Other flags of interest include `--do-videos` for generating helpful videos, among others. See [the evaluation script](evaluate.py) for documentation.
+
+This script saves evaluation results to the evaluation folder mentioned above, including `results.yaml`, which reports the geometric errors.
+
+### Dynamics Evaluation for an Experiment
+Generate dynamics predictions for Vysics, BundleSDF, PLL, and/or ground truth geometry with robot interactions using [the robot dynamics predictions script](robot_dynamics_predictions.py) via:
+```
+python robot_dynamics_predictions.py gen {ASSET_NAME} vysics bsdf pll gt --debug
+```
+...where you can specify any subset of `vysics`, `bsdf`, `pll`, and `gt` flags if you want to run simulation on the output of certain methods.
+
+> ⚠️ **NOTE:** This requires editing / adding an entry to the `PLL_BSDF_NERF_IDS_FROM_VISION_ASSET` dictionary in [robot_dynamics_predictions.py](robot_dynamics_predictions.py) so it knows what experiment runs (specified by `{BSDF_ID}`, `{NERF_BSDF_ID}`, and `{PLL_ID}`) should be associated with `vysics`, `bsdf`, and `pll` for the given `{ASSET_NAME}`.
+
+This script creates a folder at:
+```
+cnets-data-generation/robot_dynamics/{ASSET_NAME}/
+```
+...containing numerical and visual simulation results.
+
+
+## Analyzing Results
+### Quantitative Exports
+Analyze results from many runs with [the gather results script](gather_results.py) via:
+```
+python gather_results.py gather
+```
+This traverses through all of the subfolders within each of these directories:
+```
+cnets-data-generation/evaluation/
+cnets-data-generation/robot_dynamics/
+```
+...and writes yaml files into the `evaluation` directory for each experiment type, e.g. `bsdf_pll_robotocc.yaml` for Vysics, `bsdf_robotocc.yaml` for BundleSDF, and `pll_robotocc.yaml` for PLL.
+
+> ⚠️ **NOTE:** This requires editing / adding lines to the `process_gather_command` function to help it interpret what subfolders of the `evaluation` directory should be included under Vysics, BundleSDF, PLL, or other labels you define. You can use different matching patterns for the experiments and export them to different yaml files as desired.
+
+### Visual Exports
+Generate plots resulting from the exported results from the `gather` command above with [the gather results script](gather_results.py) via:
+```
+python gather_results.py plot
+```
+This opens every expected generated yaml file from the `gather` command and generates plots in the plot subdirectory:
+```
+cnets-data-generation/plots/
+```
+Currently this will make a `MMDD` subdirectory for the given month and day, then within which there will be geometry and dynamics plots.
+
+
+# Creating a New Dataset
+This repo includes [a dataset creation script](create_dataset.py) to generate new datasets from ROS bag inputs. There are a few manual steps required in addition to running this script.
+
+1. Define a new `{ASSET_NAME}` and annotate the interactions in the dataset by editing the experiment configuration file at [assets/config.yaml](./assets/config.yaml). This requires adding the following keys/entries into the file:
+ ```
+ dataset:
+ {ASSET_TYPE}:
+ 0: # Leave toss 0 blank
+ 1: {BAG_NUM}
+ ... # Repeat for every {EXP_NUM} for the given {ASSET_TYPE}
+ tosses:
+ {ASSET_TYPE}:
+ - toss: 0 # Leave toss 0 details blank
+ - toss: 1
+ start_time:
+ secs: X
+ nsecs: Y
+ end_time:
+ secs: Z
+ nsecs: W
+ ... # Repeat for every {EXP_NUM} for the given {ASSET_TYPE}
+ ```
+2. Put `raw_{BAG_NUM}.bag` into the `cnets-data-generation/rosbags/` folder. As designed, this ROS bag needs to contain the following topics:
+ - `/camera/aligned_depth_to_color/image_raw` for depth images
+ - `/camera/color/image_raw` for RGB images
+ - `/panda/joint_states` for Franka joint states
+
+ For further customization of extracting data from ROS bags, edits can be made to [the ROS bag processor](rosbag_processor.py) and how that functionality gets called from [the dataset creator](create_dataset.py).
+
+3. Create files for camera parameters. For Vysics data, we save extrinsics at: `cnets-data-generation/assets/realsense_pose_robotocc.yaml`; intrinsics at: `cnets-data-generation/assets/cam_K_robotocc.txt`.
+
+4. (Optional) Compensate for camera depth offsets. There can be a bias in the depth camera returns that do not fully line up with the camera extrinsic calibration, which usually relies on the RGB sensor. The magnitude of this bias can be a few millimeters. If high precision is desired, you may want to calibrate the depth bias with respect to some external reference measurements (e.g., from TagSLAM) if available.
+
+ To calibrate the depth bias, uncomment `inspect_camera_alignments.interactive_offset_adjustment()` in `create_dataset.py` when you generate the data of a new experiment for the first time. Then run
+ ```
+ python create_dataset.py --vision-asset=${ASSET_NAME}
+ ```
+ An interactive GUI will guide you to find the optimal bias value.
+
+ Use the result to hardcode `self.depth_offset_mm` in `self._create_images()` in `create_dataset.py`. Comment `inspect_camera_alignments.interactive_offset_adjustment()`.
+
+5. Generate the dataset for an asset from the rosbag via:
+ ```
+ python create_dataset.py --vision-asset={ASSET_NAME}
+ ```
+ This writes RGB, depth, and camera intrinsics to a `data/{ASSET_NAME}/` folder. It also writes timestamps associated with each image, robot states, and synchronized robot states for each image timestamp to a `cnets-data-generation/dataset/{ASSET_NAME}/` folder.
+
+ When creating the dataset, it may call `compute_table_offsets.py`, which estimates the height of the table surface from the data and saves the result to
+ ```
+ cnets-data-generation/table_calibration/
+ - {ASSET_NAME}.txt
+ - {ASSET_NAME}.png
+ - {ASSET_NAME}_eps.png
+ ```
+ You can also run the script manually with matplotlib-backed GUI guidance:
+ ```
+ python compute_table_offsets.py single --vision-asset={ASSET_NAME}
+ ```
+
+6. Combine the table height information into a single file.
+ A subsequent call to:
+ ```
+ python compute_table_offsets.py combine
+ ```
+ ...will combine the individual experiment results compatibly into [table_heights.yaml](table_calibration/table_heights.yaml), which contains a key `{ASSET_TYPE}` with sub-keys for every contained `{EXP_NUM}` with a float for the table height to use in each experiment.
+ You may avoid running `python compute_table_offsets.py single --vision-asset={ASSET_NAME}` for every `{ASSET_NAME}` and reuse a single height for many `{ASSET_NAME}`'s by editing the `get_table_height_from_log` function in [the offset helper script](compute_table_offsets.py) to detect from the `{ASSET_NAME}` what offset to use. The `combine` command will still be required after these changes in order to write the values to [table_heights.yaml](table_calibration/table_heights.yaml).
-# Acknowledgement
+7. Create object masks. The masks need to be in the `data/{ASSET_NAME}/masks/` folder. This can be done a number of ways, and ultimately [XMem](https://github.com/hkchengrex/XMem) worked sufficiently well for us (see BundleSDF' [repo](https://github.com/NVlabs/BundleSDF) for reference).
-We would like to thank Jeff Smith for helping with the code release. Marco Foco and his team for providing the test data on the static scene.
+After these steps, experiments can be run on the new `{ASSET_NAME}` via the [experiment instructions](#running-experiments).
+## TO DO:
+- [x] link to project page
+- [ ] make submodules public
+- [x] clean setup of docker/virtual environment
+- [ ] clean up obselete files
+- [x] publish sample data
+- [ ] publish the complete dataset
+- Documentation:
+ - [x] running experiments
+ - [x] evaluation
+ - [x] dataset creation
+- Known issue:
+ - [ ] Fix mesh overlay rendering misalignment.
-# Contact
-For questions, please contact Bowen Wen (bowenw@nvidia.com)
\ No newline at end of file
+## Acknowledgement
+Thanks to the great open-source repositories: [BundleSDF](https://github.com/NVlabs/BundleSDF), [PLL](https://github.com/DAIRLab/dair_pll).
\ No newline at end of file
diff --git a/run_all.sh b/run_all.sh
new file mode 100755
index 0000000..2521cc2
--- /dev/null
+++ b/run_all.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+export PYTHONPATH=${PWD}:${PYTHONPATH}
+# export LD_LIBRARY_PATH=/opt/conda/lib:${LD_LIBRARY_PATH} # changing LD_LIBRARY_PATH is dangerous and is now not needed
+source /opt/ros/noetic/setup.bash
+
+assets=(bakingbox_1)
+ERROR_LOG="batch_errors.log"
+
+BSDF_ID="00-onescript"
+PLL_ID="t0-onescript"
+NERF_BSDF_ID="00-t0-onescript"
+
+> $ERROR_LOG # Clear the error log at start
+
+for asset in ${assets[@]}; do
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+ # robotocc exps
+ python bundlenets/run_custom.py --run-name=${BSDF_ID} --vision-asset=robotocc_${asset} --cycle-iteration=1 --use-gui=0 --debug-level=2 --clear-data
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: BundleSDF Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_end=$(date '+%m/%d/%Y %H:%M:%S')
+ end_seconds=$(date '+%s')
+ diff_seconds=$((end_seconds-start_seconds))
+ hours=$((diff_seconds / 3600))
+ minutes=$((diff_seconds % 3600 / 60))
+ seconds=$((diff_seconds % 60))
+ echo "============== End time : $dt_end"
+ echo "============== Time taken by bundlesdf: ${hours}h:${minutes}m:${seconds}s"
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+ cd cnets-data-generation
+ python conversion_bsdf_to_pll.py --vision-asset=robotocc_${asset} --bundlesdf-id=${BSDF_ID} --nerf-bundlesdf-id=${BSDF_ID} --cycle-iteration=1 --remote --noshow --skip-videos
+ cd ..
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: conversion_bsdf_to_pll Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_end=$(date '+%m/%d/%Y %H:%M:%S')
+ end_seconds=$(date '+%s')
+ diff_seconds=$((end_seconds-start_seconds))
+ hours=$((diff_seconds / 3600))
+ minutes=$((diff_seconds % 3600 / 60))
+ seconds=$((diff_seconds % 60))
+ echo "============== End time : $dt_end"
+ echo "============== Time taken to convert bsdf to pll: ${hours}h:${minutes}m:${seconds}s"
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+
+ cd dair_pll
+ python examples/contactnets_vision.py --remote --run-name=${PLL_ID}_robotocc_${asset} --vision-asset=robotocc_${asset} --cycle-iteration=1 --bundlesdf-id=${BSDF_ID} --nerf-bundlesdf-id=${BSDF_ID} --skip-videos=all --force-video-epoch-interval=100 --clear-data
+ cd ..
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: contactnets_vision Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_end=$(date '+%m/%d/%Y %H:%M:%S')
+ end_seconds=$(date '+%s')
+ diff_seconds=$((end_seconds-start_seconds))
+ hours=$((diff_seconds / 3600))
+ minutes=$((diff_seconds % 3600 / 60))
+ seconds=$((diff_seconds % 60))
+ echo "============== End time : $dt_end"
+ echo "============== Time taken for pll training: ${hours}h:${minutes}m:${seconds}s"
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+
+ cd cnets-data-generation
+ python conversion_pll_to_bsdf.py --vision-asset=robotocc_${asset} --pll-id=${PLL_ID}_robotocc_${asset} --cycle-iteration=1
+ cd ..
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: conversion_pll_to_bsdf Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_end=$(date '+%m/%d/%Y %H:%M:%S')
+ end_seconds=$(date '+%s')
+ diff_seconds=$((end_seconds-start_seconds))
+ hours=$((diff_seconds / 3600))
+ minutes=$((diff_seconds % 3600 / 60))
+ seconds=$((diff_seconds % 60))
+ echo "============== End time : $dt_end"
+ echo "============== Time taken to convert pll to bsdf: ${hours}h:${minutes}m:${seconds}s"
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+
+ python bundlenets/run_custom.py --run-name=${NERF_BSDF_ID} --tracking-run-name=${BSDF_ID} --pll-id=${PLL_ID}_robotocc_${asset} --vision-asset=robotocc_${asset} --mode=test_loss --share-tracking --mode=test_loss --cycle-iteration=1 --clear-data
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: BundleSDF 2 Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_start=$(date '+%m/%d/%Y %H:%M:%S')
+ start_seconds=$(date '+%s')
+ echo "============== Start time : $dt_start"
+ cd cnets-data-generation
+ python conversion_bsdf_to_pll.py --vision-asset=robotocc_${asset} --bundlesdf-id=${BSDF_ID} --nerf-bundlesdf-id=${NERF_BSDF_ID} --cycle-iteration=1 --remote --noshow --skip-videos
+ cd ..
+
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ echo "ERROR: conversion_bsdf_to_pll Processing failed for $asset (exit code: $EXIT_STATUS)" >> $ERROR_LOG
+ fi
+
+ dt_end=$(date '+%m/%d/%Y %H:%M:%S')
+ end_seconds=$(date '+%s')
+ diff_seconds=$((end_seconds-start_seconds))
+ hours=$((diff_seconds / 3600))
+ minutes=$((diff_seconds % 3600 / 60))
+ seconds=$((diff_seconds % 60))
+ echo "============== End time : $dt_end"
+ echo "============== Time taken to convert bsdf to pll: ${hours}h:${minutes}m:${seconds}s"
+
+ end_time=$(date +%s)
+ runtime=$((end_time - start_time))
+ echo "Runtime for $asset: $runtime seconds" >> runtime_meshmask.txt
+done
+dt=$(date '+%d/%m/%Y %H:%M:%S');
+echo "======$dt"
+
+# Check if there were any errors at the end
+if [ -s $ERROR_LOG ]; then
+ echo "Batch job completed with errors. See $ERROR_LOG for details."
+else
+ echo "Batch job completed successfully with no errors."
+fi
\ No newline at end of file
diff --git a/run_custom.py b/run_custom.py
deleted file mode 100644
index 816d741..0000000
--- a/run_custom.py
+++ /dev/null
@@ -1,420 +0,0 @@
-# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-
-from bundlesdf import *
-import argparse
-import os,sys
-from mesh_utils import post_process_obj
-from data_preparation import DatasetManagement
-from segmentation_utils import Segmenter
-import shlex
-import xml.etree.ElementTree as ET
-
-CODE_DIR = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(CODE_DIR)
-
-
-def run_one_video(video_dir, out_folder, use_segmenter=False, use_gui=False, with_cnets=False):
- set_seed(0)
-
- os.system(f'rm -rf {out_folder} && mkdir -p {out_folder}')
-
- cfg_bundletrack = yaml.load(open(f"{CODE_DIR}/BundleTrack/config_ho3d.yml",'r'))
- cfg_bundletrack['SPDLOG'] = int(args.debug_level)
- cfg_bundletrack['depth_processing']["zfar"] = 1
- cfg_bundletrack['depth_processing']["percentile"] = 95
- cfg_bundletrack['erode_mask'] = 3
- cfg_bundletrack['debug_dir'] = out_folder+'/'
- cfg_bundletrack['bundle']['max_BA_frames'] = 10
- cfg_bundletrack['bundle']['max_optimized_feature_loss'] = 0.03
- cfg_bundletrack['feature_corres']['max_dist_neighbor'] = 0.03 #0.02
- cfg_bundletrack['feature_corres']['max_normal_neighbor'] = 45 #30
- cfg_bundletrack['feature_corres']['max_dist_no_neighbor'] = 0.02 #0.01
- cfg_bundletrack['feature_corres']['max_normal_no_neighbor'] = 45 #20
- cfg_bundletrack['feature_corres']['map_points'] = True
- cfg_bundletrack['feature_corres']['resize'] = 400
- cfg_bundletrack['feature_corres']['rematch_after_nerf'] = True
- cfg_bundletrack['keyframe']['min_rot'] = 10 #5
- cfg_bundletrack['ransac']['inlier_dist'] = 0.01
- cfg_bundletrack['ransac']['inlier_normal_angle'] = 20
- cfg_bundletrack['ransac']['max_trans_neighbor'] = 0.02
- cfg_bundletrack['ransac']['max_rot_deg_neighbor'] = 30
- cfg_bundletrack['ransac']['max_trans_no_neighbor'] = 0.01
- cfg_bundletrack['ransac']['max_rot_no_neighbor'] = 10
- cfg_bundletrack['p2p']['max_dist'] = 0.02
- cfg_bundletrack['p2p']['max_normal_angle'] = 45
- cfg_track_dir = f'{out_folder}/config_bundletrack.yml'
- yaml.dump(cfg_bundletrack, open(cfg_track_dir,'w'))
-
- cfg_nerf = yaml.load(open(f"{CODE_DIR}/assets/config.yml",'r'))
- cfg_nerf['continual'] = True
- cfg_nerf['trunc_start'] = 0.01
- cfg_nerf['trunc'] = 0.01
- cfg_nerf['mesh_resolution'] = 0.005
- cfg_nerf['down_scale_ratio'] = 1
- cfg_nerf['fs_sdf'] = 0.1
- cfg_nerf['far'] = cfg_bundletrack['depth_processing']["zfar"]
- cfg_nerf['datadir'] = f"{cfg_bundletrack['debug_dir']}nerf_with_bundletrack_online"
- cfg_nerf['notes'] = ''
- cfg_nerf['expname'] = 'nerf_with_bundletrack_online'
- cfg_nerf['save_dir'] = cfg_nerf['datadir']
-
- cfg_nerf['data_dir'] = video_dir
- cfg_nerf['debug_dir']= cfg_bundletrack['debug_dir']
- cfg_nerf['dry_cnets'] = args.dry_cnets
- if with_cnets:
- print('>>>>>>>> Running with cnets data')
- cfg_nerf['cn_mesh'] = f'{CODE_DIR}/assets/test_with_23_iter_344.obj'
- cfg_nerf['support_pts'] = f'{CODE_DIR}/assets/support_pts.pt'
- cfg_nerf['sdfs_from_cnets'] = f'{CODE_DIR}/assets/sdfs_from_cnets.pt'
- cfg_nerf['sampled_pts'] = f'{CODE_DIR}/assets/sampled_pts.pt'
- cfg_nerf['sdf_bounds_from_cnets'] = f'{CODE_DIR}/assets/sdf_bounds_from_cnets.pt'
- else:
- print('>>>>>>>>>> Running without cnets data')
- cfg_nerf['cn_mesh'] = None
- cfg_nerf['support_pts'] = None
- cfg_nerf['sdfs_from_cnets'] = None
- cfg_nerf['sampled_pts'] = None
- cfg_nerf['sdf_bounds_from_cnets'] = None
-
- cfg_nerf_dir = f'{out_folder}/config_nerf.yml'
- yaml.dump(cfg_nerf, open(cfg_nerf_dir,'w'))
-
- if use_segmenter:
- segmenter = Segmenter()
-
- tracker = BundleSdf(cfg_track_dir=cfg_track_dir, cfg_nerf_dir=cfg_nerf_dir, start_nerf_keyframes=2000, use_gui=use_gui) # start_nerf_keyframes=5
-
- reader = YcbineoatReader(video_dir=video_dir, shorter_side=480)
-
-
- for i in range(0,len(reader.color_files),args.stride):
- color_file = reader.color_files[i]
- color = cv2.imread(color_file)
- H0, W0 = color.shape[:2]
- depth = reader.get_depth(i)
- H,W = depth.shape[:2]
- color = cv2.resize(color, (W,H), interpolation=cv2.INTER_NEAREST)
- depth = cv2.resize(depth, (W,H), interpolation=cv2.INTER_NEAREST)
-
- if i==0:
- mask = reader.get_mask(0)
- mask = cv2.resize(mask, (W,H), interpolation=cv2.INTER_NEAREST)
- if use_segmenter:
- mask = segmenter.run(color_file.replace('rgb','masks'))
- else:
- if use_segmenter:
- mask = segmenter.run(color_file.replace('rgb','masks'))
- else:
- mask = reader.get_mask(i)
- mask = cv2.resize(mask, (W,H), interpolation=cv2.INTER_NEAREST)
-
- if cfg_bundletrack['erode_mask']>0:
- kernel = np.ones((cfg_bundletrack['erode_mask'], cfg_bundletrack['erode_mask']), np.uint8)
- mask = cv2.erode(mask.astype(np.uint8), kernel)
-
- id_str = reader.id_strs[i]
- pose_in_model = np.eye(4)
-
- K = reader.K.copy()
-
- tracker.run(color, depth, K, id_str, mask=mask, occ_mask=None, pose_in_model=pose_in_model)
-
- tracker.on_finish()
-
- run_one_video_global_nerf(video_dir, out_folder=out_folder, with_cnets=with_cnets)
-
-
-def run_one_video_global_nerf(video_dir, out_folder, with_cnets=False):
- set_seed(0)
-
- out_folder += '/' #!NOTE there has to be a / in the end
-
- cfg_bundletrack = yaml.load(open(f"{out_folder}config_bundletrack.yml",'r'))
- cfg_bundletrack['debug_dir'] = out_folder
- cfg_track_dir = f"{out_folder}/config_bundletrack.yml"
- yaml.dump(cfg_bundletrack, open(cfg_track_dir,'w'))
-
- cfg_nerf = yaml.load(open(f"{out_folder}/config_nerf.yml",'r'))
-
- cfg_nerf['dry_cnets'] = args.dry_cnets
- if with_cnets:
- print('>>>>>>>> Running with cnets data')
- cfg_nerf['cn_mesh'] = f'{CODE_DIR}/assets/test_with_23_iter_344.obj'
- cfg_nerf['support_pts'] = f'{CODE_DIR}/assets/support_pts.pt'
- cfg_nerf['sdfs_from_cnets'] = f'{CODE_DIR}/assets/sdfs_from_cnets.pt'
- cfg_nerf['sampled_pts'] = f'{CODE_DIR}/assets/sampled_pts.pt'
- cfg_nerf['sdf_bounds_from_cnets'] = f'{CODE_DIR}/assets/sdf_bounds_from_cnets.pt'
- else:
- print('>>>>>>>>>> Running without cnets data')
- cfg_nerf['cn_mesh'] = None
- cfg_nerf['support_pts'] = None
- cfg_nerf['sdfs_from_cnets'] = None
- cfg_nerf['sampled_pts'] = None
- cfg_nerf['sdf_bounds_from_cnets'] = None
-
- cfg_nerf['data_dir'] = video_dir
- cfg_nerf['pll_dir'] = f'{CODE_DIR}/dair_pll'
- cfg_nerf['assets_dir']=f'{CODE_DIR}/assets'
- cfg_nerf['n_step'] = 2000
- cfg_nerf['N_samples'] = 64
- cfg_nerf['N_samples_around_depth'] = 256
- cfg_nerf['first_frame_weight'] = 1
- cfg_nerf['down_scale_ratio'] = 1
- cfg_nerf['finest_res'] = 256
- cfg_nerf['num_levels'] = 16
- cfg_nerf['mesh_resolution'] = 0.002
- cfg_nerf['n_train_image'] = 500
- cfg_nerf['fs_sdf'] = 0.1
- cfg_nerf['frame_features'] = 2
- cfg_nerf['rgb_weight'] = 100
- cfg_nerf['contact_pts_weight'] = 10
- cfg_nerf['support_pts_weight'] = 2000 # 500
- cfg_nerf['hc_sdf_weight_lower'] = 2000 #4000
- cfg_nerf['hc_sdf_weight_upper'] = 200 #3000
- cfg_nerf['pretrain_eikonal_weight'] = 0.1
- cfg_nerf['pretrain_normal_direction_weight'] = 100
- cfg_nerf['eps_minimal_surface'] = 0.5
- cfg_nerf['pretrain_minimal_surface_weight'] = 100
- cfg_nerf['pretrain_hessian_weight'] = 10
- cfg_nerf['pretrain_finite_diff_weight'] = 10
- # cfg_nerf['eikonal_weight'] = 1
-
- cfg_nerf['i_img'] = np.inf
- cfg_nerf['i_mesh'] = cfg_nerf['i_img']
- cfg_nerf['i_nerf_normals'] = cfg_nerf['i_img']
- cfg_nerf['i_save_ray'] = cfg_nerf['i_img']
-
- cfg_nerf['datadir'] = f"{out_folder}nerf_with_bundletrack_online"
- cfg_nerf['save_dir'] = copy.deepcopy(cfg_nerf['datadir'])
-
- os.makedirs(cfg_nerf['datadir'],exist_ok=True)
-
- cfg_nerf_dir = f"{cfg_nerf['datadir']}/config.yml"
- yaml.dump(cfg_nerf, open(cfg_nerf_dir,'w'))
-
- reader = YcbineoatReader(video_dir=args.video_dir, downscale=1)
-
- tracker = BundleSdf(cfg_track_dir=cfg_track_dir, cfg_nerf_dir=cfg_nerf_dir,
- start_nerf_keyframes=5, with_cnets=with_cnets)
- tracker.cfg_nerf = cfg_nerf
- tracker.run_global_nerf(reader=reader, get_texture=True, tex_res=512)
- tracker.on_finish()
-
- print(f"Done")
-
-
-def postprocess_mesh(out_folder):
- mesh_files = sorted(glob.glob(f'{out_folder}/**/nerf/*normalized_space.obj',recursive=True))
- print(f"Using {mesh_files[-1]}")
- os.makedirs(f"{out_folder}/mesh/",exist_ok=True)
-
- print(f"\nSaving meshes to {out_folder}/mesh/\n")
-
- mesh = trimesh.load(mesh_files[-1])
- with open(f'{os.path.dirname(mesh_files[-1])}/config.yml','r') as ff:
- cfg = yaml.load(ff)
- tf = np.eye(4)
- tf[:3,3] = cfg['translation']
- tf1 = np.eye(4)
- tf1[:3,:3] *= cfg['sc_factor']
- tf = tf1@tf
- mesh.apply_transform(np.linalg.inv(tf))
- mesh.export(f"{out_folder}/mesh/mesh_real_scale.obj")
-
- components = trimesh_split(mesh, min_edge=1000)
- best_component = None
- best_size = 0
- for component in components:
- dists = np.linalg.norm(component.vertices,axis=-1)
- if len(component.vertices)>best_size:
- best_size = len(component.vertices)
- best_component = component
- mesh = trimesh_clean(best_component)
-
- mesh.export(f"{out_folder}/mesh/mesh_biggest_component.obj")
- mesh = trimesh.smoothing.filter_laplacian(mesh,lamb=0.5, iterations=3, implicit_time_integration=False, volume_constraint=True, laplacian_operator=None)
- mesh.export(f'{out_folder}/mesh/mesh_biggest_component_smoothed.obj')
-
-def process_traj(system,iter):
- cn_dir = f'{CODE_DIR}/assets/config_cn.yaml'
- with open(cn_dir, 'r') as ff:
- cfg_cn = yaml.load(ff)
-
- save_dir = cfg_cn['save_dir']+f"iter_{iter}/"
- os.system(f'rm -rf {save_dir} && mkdir -p {save_dir}')
- cam = 'cam0' # realsense camera name
- with open(cfg_cn['camera_config'], 'r') as stream:
- data_loaded = yaml.load(stream)
- cam_pos_dict = data_loaded[cam]['pose']['position']
- cam_trans = np.array([cam_pos_dict['x'], cam_pos_dict['y'], cam_pos_dict['z']]).reshape(-1, 1)
- cam_rot_dict = data_loaded[cam]['pose']['rotation']
- cam_axis_vec = np.array([cam_rot_dict['x'], cam_rot_dict['y'], cam_rot_dict['z']])
- for toss_id in range(1, 11):
- dataset = f'old_toss_{toss_id}'
- traj_dir = cfg_cn['debug_dir']+f'/{dataset}/ob_in_cam/'
- tagslam_dir = f'/home/cnets-vision/mengti_ws/robot_filter/dataset/old_toss_{toss_id}/tagslam_poses/tagslam.txt'
- gt_pose_dir = f'/home/cnets-vision/mengti_ws/robot_filter/dataset/{dataset}/tagslam_poses/'
- timestamps = np.loadtxt(gt_pose_dir+'tagslam.txt')[:,0]
- odom_dir = f'{CODE_DIR}/data/{dataset}/annotated_poses/'
- # frame_num = len(os.listdir(traj_dir))
- frame_num = np.loadtxt(tagslam_dir).shape[0]
- pose_dir = traj_dir
- start_frame = cfg_cn['tosses'][system][toss_id-1]['start_frame']
- end_frame = cfg_cn['tosses'][system][toss_id-1]['end_frame']
- cn_traj = DatasetManagement(frame_num, start_frame, end_frame, timestamps, toss_id, cam_trans, cam_axis_vec, odom_dir, pose_dir, save_dir, gt=True)
- cn_traj.do_process()
-
-def run_combined_learning(video_dir, out_folder, use_segmenter, use_gui, system, datasize, storage, run):
- def replace_urdf(urdf_file, filename):
- with open(urdf_file, 'r') as file:
- content = file.read()
- content = content.replace('', f'')
- with open(urdf_file, 'w') as file:
- if not content.startswith('\n' + content
- file.write(content)
-
- cn_dir = f'{CODE_DIR}/assets/config_cn.yaml'
- with open(cn_dir, 'r') as ff:
- cfg_cn = yaml.load(ff)
- iters = cfg_cn['iter']
- pll_dir = cfg_cn['pll_dir']
- iter = 0
- while iter < iters:
- if iter==0:
- run_one_video(video_dir, out_folder+f'/iter_{iter}', use_segmenter, use_gui)
- # run_one_video_global_nerf(out_folder+f'/iter_{iter}')
- command = f'python dair_pll/examples/bundlesdf_simple.py --structured --system=bundlesdf_{system} --geometry=polygon --source=real --contactnets --regenerate --no-residual --loss-variation=1 --inertia-params=0 --dataset-size={datasize} "{storage}-{iter}" "{run}"'
- try:
- subprocess.call(shlex.split(command))
- except subprocess.CalledProcessError as e:
- print(e)
- # copy cn result to assets
- command = f'cp {CODE_DIR}/results/{storage}-{iter}/runs/{run}/urdfs/body.obj {pll_dir}/assets/body_{storage}_{run}-{iter}.obj'
- try:
- subprocess.call(shlex.split(command))
- except subprocess.CalledProcessError as e:
- print(e)
- else:
- run_one_video(video_dir, out_folder+f'/iter_{iter}', use_segmenter, use_gui, shapefile=f'body_{storage}_{run}-{iter-1}.obj')
- # run_one_video_global_nerf(out_folder+f'/iter_{iter}',shapefile=f'body_{storage}_{run}-{iter-1}.obj')
- cfg_nerf = yaml.load(open(f"{out_folder}/iter_{iter}/final/nerf/normalization.yml",'r'))
- mesh_file = out_folder+f'/iter_{iter}/mesh_cleaned.obj'
- mesh_output = out_folder+f'/iter_{iter}/mesh_cleaned_processed.obj'
- post_process_obj(mesh_file, mesh_output, cfg_nerf['sc_factor'])
- urdf_dir = f'{CODE_DIR}/results/{storage}-{iter-1}/runs/{run}/urdfs/bundlesdf_{system}_mesh_vis.urdf'
- replace_urdf(urdf_dir, mesh_output)
- command = f'python dair_pll/examples/bundlesdf_simple.py --structured --system=bundlesdf_{system} --geometry=polygon --source=real --contactnets --regenerate --no-residual --loss-variation=1 --inertia-params=0 --dataset-size={datasize} "{storage}-{iter}" "{run}"'
- try:
- subprocess.call(shlex.split(command))
- except subprocess.CalledProcessError as e:
- print(e)
- # copy cn result to assets
- command = f'cp {CODE_DIR}/results/{storage}-{iter}/runs/{run}/urdfs/body.obj {pll_dir}/assets/body_{storage}_{run}-{iter}.obj'
- try:
- subprocess.call(shlex.split(command))
- except subprocess.CalledProcessError as e:
- print(e)
-
- iter+=1
-
-def draw_pose():
- K = np.loadtxt(f'{args.out_folder}/cam_K.txt').reshape(3,3)
- color_files = sorted(glob.glob(f'{args.out_folder}/color/*'))
- mesh = trimesh.load(f'{args.out_folder}/textured_mesh.obj')
- to_origin, extents = trimesh.bounds.oriented_bounds(mesh)
- bbox = np.stack([-extents/2, extents/2], axis=0).reshape(2,3)
- out_dir = f'{args.out_folder}/pose_vis'
- os.makedirs(out_dir, exist_ok=True)
- logging.info(f"Saving to {out_dir}")
- for color_file in color_files:
- color = imageio.imread(color_file)
- pose = np.loadtxt(color_file.replace('.png','.txt').replace('color','ob_in_cam'))
- pose = pose@np.linalg.inv(to_origin)
- vis = draw_posed_3d_box(K, color, ob_in_cam=pose, bbox=bbox, line_color=(255,255,0))
- id_str = os.path.basename(color_file).replace('.png','')
- imageio.imwrite(f'{out_dir}/{id_str}.png', vis)
-
-
-if __name__=="__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument('--mode', type=str, default="run_video",
- help="run_video / global_refine / get_mesh / " + \
- "combined_learning / draw_pose / init_nert / test_loss")
- parser.add_argument('--video_dir', type=str,
- default="/home/bowen/debug/2022-11-18-15-10-24_milk/")
- parser.add_argument('--out_folder', type=str,
- default="/home/bowen/debug/bundlesdf_2022-11-18-15-10-24_milk")
- parser.add_argument('--use_segmenter', type=int, default=0)
- parser.add_argument('--use_gui', type=int, default=1)
- parser.add_argument('--stride', type=int, default=1,
- help='interval of frames to run; 1 means using every frame')
- parser.add_argument('--debug_level', type=int, default=2,
- help='higher means more logging')
- parser.add_argument('--system', type=str, default='cube',
- help='object name')
- parser.add_argument('--datasize', type=int, default=9,
- help='number of trajectories')
- parser.add_argument('--storage', type=str, default='test',
- help='storage name')
- parser.add_argument('--run', type=str, default='test', help='run name')
- parser.add_argument('--with_cnets', type=bool, default=False,
- help='use contactnets information to improve the sdf training')
- parser.add_argument('--dry_cnets', action='store_true',
- help='use contactnets information to improve the sdf training')
- args = parser.parse_args()
-
- # Set random seed before doing anything else.
- print('Set random seed before doing anything.')
- set_seed(0)
-
- if args.mode=='run_video':
- run_one_video(video_dir=args.video_dir, out_folder=args.out_folder,
- use_segmenter=args.use_segmenter, use_gui=args.use_gui, with_cnets=args.with_cnets)
-
- elif args.mode=='global_refine':
- run_one_video_global_nerf(out_folder=args.out_folder)
-
- elif args.mode=='get_mesh':
- postprocess_mesh(out_folder=args.out_folder)
-
- elif args.mode=='combined_learning':
- run_combined_learning(video_dir=args.video_dir, out_folder=args.out_folder,
- use_segmenter=args.use_segmenter,
- use_gui=args.use_gui, system=args.system,
- datasize=args.datasize, storage=args.storage,
- run=args.run)
-
- elif args.mode=='draw_pose':
- draw_pose()
-
- elif args.mode=='init_nerf':
- # run_one_video(video_dir=args.video_dir, out_folder=args.out_folder,
- # use_segmenter=args.use_segmenter, use_gui=args.use_gui,
- # shapefile='gt_cube_simple.obj')
- run_one_video_global_nerf(out_folder=args.out_folder,
- shapefile='gt_cube_simple.obj')
- # With bundlesdf output:
- # box_textured_mesh_convex.obj, napkin_textured_mesh.obj, bottle_textured_mesh.obj
- # With contactnets output:
- # run_one_video_global_nerf(out_folder=args.out_folder,
- # shapefile='dair_bottle_aligned.ply') # bottle result
- # run_one_video_global_nerf(out_folder=args.out_folder,
- # shapefile='dair_napkin_aligned.ply') # napkin result
-
- elif args.mode=='test_loss':
- # run_one_video(video_dir=args.video_dir, out_folder=args.out_folder,
- # use_segmenter=args.use_segmenter, use_gui=args.use_gui)
- run_one_video_global_nerf(video_dir=args.video_dir,
- out_folder=args.out_folder,
- with_cnets=args.with_cnets)
-
- else:
- raise RuntimeError
diff --git a/vis_utils.py b/vis_utils.py
deleted file mode 100644
index 5873aa0..0000000
--- a/vis_utils.py
+++ /dev/null
@@ -1,339 +0,0 @@
-import os
-import shutil
-from PIL import Image
-import numpy as np
-import matplotlib.pyplot as plt
-import argparse
-import torch
-# import trimesh
-
-def concatenate_masks():
- src_base = "./data"
- dst_dir = os.path.join(src_base, "box_0", "masks")
- current_num = 1
-
- # Loop through box_1 to box_10
- for i in range(1, 11):
- src_subfolder = os.path.join(src_base, f"box_{i}", "masks")
-
- # Get the list of png files in the masks subfolder
- image_files = [f for f in os.listdir(src_subfolder) if f.endswith('.png')]
- image_files.sort() # To make sure we get them in the right order
-
- for img_file in image_files:
- dst_file = os.path.join(dst_dir, f"{current_num:04}.png")
- src_file = os.path.join(src_subfolder, img_file)
-
- # Copy the image to the destination
- shutil.copy(src_file, dst_file)
-
- # Update the current number
- current_num += 1
-
-def process_masks(input_folder):
- '''Convert grey parts in masks to background
- '''
- for file_name in os.listdir(input_folder):
- if file_name.endswith('.png'):
- file_path = os.path.join(input_folder, file_name)
- image = Image.open(file_path)
- image_array = np.array(image)
- binary_image_array = (image_array >= 255).astype(np.uint8) * 255
- processed_image = Image.fromarray(binary_image_array)
- processed_image.save(os.path.join(input_folder, file_name))
-
-def visualize(filename, max_n=10000):
- if isinstance(filename, str):
- pts = torch.load(filename)
- else:
- pts = filename
- pts = pts.detach().numpy()
- if max_n > 0:
- if pts.shape[0]>max_n:
- idxs = np.random.permutation(pts.shape[0])
- pts = pts[idxs[:max_n]]
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=1)
- print(f"{pts.shape=}")
- ax.set_xlabel('X-axis')
- ax.set_ylabel('Y-axis')
- ax.set_zlabel('Z-axis')
- ax.legend()
- plt.show()
- plt.savefig('vis.png')
-
-def visualize_two(filename, filename2, max_n=10000):
- pts = torch.load(filename)
- pts2 = torch.load(filename2)
- pts = pts.detach().numpy()
- pts2 = pts2.detach().numpy()
- if max_n > 0:
- if pts.shape[0]>max_n:
- idxs = np.random.permutation(pts.shape[0])
- pts = pts[idxs[:max_n]]
- if pts2.shape[0]>max_n:
- idxs = np.random.permutation(pts2.shape[0])
- pts2 = pts2[idxs[:max_n]]
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=1)
- ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c='red', s=1)
- print(f"{pts.shape=}")
- print(f"{pts2.shape=}")
- ax.set_xlabel('X-axis')
- ax.set_ylabel('Y-axis')
- ax.set_zlabel('Z-axis')
- ax.legend()
- plt.show()
- plt.savefig('vis.png')
-
-def visualize_pts_sdfs(pts, sdf):
- if isinstance(pts, str):
- pts = torch.load(pts)
- if isinstance(sdf, str):
- sdf = torch.load(sdf)
- N = pts.shape[0]
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- # pts = torch.load(pts).detach().numpy()
- # sdf = torch.load(sdf).detach().numpy()
- idx = np.random.permutation(N)[:5000]
- pts = pts[idx]
- sdf = sdf[idx]
- print(f"{sdf.shape=}")
- print(f"{pts.shape=}")
- colored = ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], c=sdf,
- cmap='coolwarm', marker='o', vmin=-1, vmax=1,
- label='pts', s=2)
-
- cbar = fig.colorbar(colored)
- cbar.set_label('sdfs')
- ax.set_xlabel('X-axis')
- ax.set_ylabel('Y-axis')
- ax.set_zlabel('Z-axis')
- ax.legend()
- plt.show()
- plt.savefig(f'plot_{N}.png')
-
-def visualize_two_pts_sdfs(pts1, sdf1, pts2, sdf2, max_n=5000, cval_geo1=True, cval_geo2=True):
- '''
- If sdf is provided, use sdf to colorize,
- else if cval_geo is True, use position to colorize,
- otherwise use a constant color.
- '''
- cval_sdf1 = True
- if isinstance(pts1, str):
- pts1 = torch.load(pts1)
- if isinstance(sdf1, str):
- sdf1 = torch.load(sdf1)
- elif sdf1 is None:
- sdf1 = torch.zeros_like(pts1[:,0])
- cval_sdf1 = False
-
- cval_sdf2 = True
- if isinstance(pts2, str):
- pts2 = torch.load(pts2)
- if isinstance(sdf2, str):
- sdf2 = torch.load(sdf2)
- elif sdf2 is None:
- sdf2 = torch.zeros_like(pts2[:,0])
- cval_sdf2 = False
-
- print(f"Before subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}")
- if max_n > 0:
- if pts1.shape[0]>max_n:
- idxs = np.random.permutation(pts1.shape[0])
- pts1 = pts1[idxs[:max_n]]
- sdf1 = sdf1[idxs[:max_n]]
- if pts2.shape[0]>max_n:
- idxs = np.random.permutation(pts2.shape[0])
- pts2 = pts2[idxs[:max_n]]
- sdf2 = sdf2[idxs[:max_n]]
-
- print(f"After subsample: {pts1.shape=}, {sdf1.shape=}, {pts2.shape=}, {sdf2.shape=}")
-
- if cval_sdf1:
- cmap1 = 'coolwarm'
- cval1 = sdf1
- elif cval_geo1:
- cmap1 = 'viridis'
- # cval1 = pts1.sum(1)
- cval1 = pts1[:, 2]
- else:
- cmap1 = 'Greens'
- cval1 = sdf1
-
- if cval_sdf2:
- cmap2 = 'coolwarm'
- cval2 = sdf2
- elif cval_geo2:
- cmap2 = 'viridis'
- # cval2 = pts2.sum(1)
- cval2 = pts2[:, 2]
- else:
- cmap2 = 'Greys'
- cval2 = sdf2
-
- fig = plt.figure(figsize=(8, 8))
- ax = fig.add_subplot(111, projection='3d')
- pts1 = pts1.cpu().detach().numpy()
- pts2 = pts2.cpu().detach().numpy()
- # ax.scatter(pts1[:,0], pts1[:,1], pts1[:,2], color='red', s=1)
- colored1 = ax.scatter(pts1[:, 0], pts1[:, 1], pts1[:, 2], c=cval1,
- cmap=cmap1, marker='o', vmin=-1, vmax=1,
- label='pts1', s=2)
- # ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c='green',
- # marker='.', s=1)
- colored2 = ax.scatter(pts2[:, 0], pts2[:, 1], pts2[:, 2], c=cval2, cmap=cmap2, vmin=-1, vmax=1,
- marker='.', label='pts2', s=1)
- # ax.scatter(rendered_pts[:, 0], rendered_pts[:, 1], rendered_pts[:, 2])
- # Because both scatter series are using the 'viridis' color map, the
- # colorbar will share a mapping for both series.
- if cval_sdf1:
- cbar = fig.colorbar(colored1)
- cbar.set_label('sdf1')
- elif cval_geo1:
- cbar = fig.colorbar(colored1)
- cbar.set_label('pts1')
-
- if cval_sdf2:
- cbar = fig.colorbar(colored2)
- cbar.set_label('sdf2')
- elif cval_geo2:
- cbar = fig.colorbar(colored2)
- cbar.set_label('pts2')
-
- ax.set_xlabel('X-axis')
- ax.set_ylabel('Y-axis')
- ax.set_zlabel('Z-axis')
- ax.legend()
-
- # Set equal aspect ratio.
- ax.set_box_aspect([np.ptp(arr) for arr in \
- [ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]])
- plt.show()
-
-def convert_to_grayscale(folder_path):
- # Iterate over each file in the folder
- for filename in os.listdir(folder_path):
- file_path = os.path.join(folder_path, filename)
-
- # Open the image file
- image = Image.open(file_path)
-
- # Convert the image to grayscale
- image_gray = image.convert("L")
- print(np.array(image_gray).shape)
- # Save the grayscale image
- image_gray.save(file_path)
-
-def uniform_sample(tensor, num_samples):
- N = tensor.size(0)
- if num_samples > N:
- raise ValueError("num_samples must be less than or equal to the number of points in the tensor")
- indices = torch.randperm(N)[:num_samples]
- return tensor[indices]
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--toss_id",
- type=int,
- default=1,
- required=False,
- )
- parser.add_argument(
- "--type",
- type=str,
- default='cube',
- required=False
- )
- args = parser.parse_args()
- toss_id = args.toss_id
- type = args.type
- folder = f'./data/{type}_{toss_id}/Annotations'
-
- # obj_file = 'results/cube_2/mesh_cleaned.obj' # normalized space. textured_mesh.obj is in real-world.
- # mesh_cleaned = trimesh.load(obj_file, force='mesh')
- # mesh_cleaned_pts = mesh_cleaned.sample(10000)
- # mesh_cleaned_pts = torch.tensor(mesh_cleaned_pts)
- # convert_to_grayscale(folder)
- # rendered_pts = torch.load('rendered_pts.pt')
- # rendered_pts = uniform_sample(rendered_pts, num_samples=2000)
- # rendered_pts = rendered_pts.detach().numpy()
- # print(rendered_pts.shape)
- # visualize_pts_locally('support_pts_processed.pt','sdfs_from_cnets.pt','sampled_pts_processed.pt', 'sdf_bounds_from_cnets.pt')
- # visualize_pts_locally('sdf_ptsall_2.pt', 'sdf_ptsall_2_predsdf.pt', 'contact_pts_new.pt', None) # 'sdf_pts2.pt')
- # visualize_pts_locally('sdf_ptsall_1.pt', 'sdf_ptsall_1_predsdf.pt', mesh_cleaned_pts, None, 5000) # 'sdf_pts2.pt')
- # visualize_pts_locally('contact_pts_gt.pt', 'contact_and_near_surface_sdf.pt', 'empty_pts2.pt', 'sdf_pts2.pt')
- ### support_pts.pt: support points and the sampled points along the querying direction near the support points
- ### sdfs_from_cnets: the sdf values of the above points
- ### sampled_pts.pt: points sampled inside and outside of the hyperplanes
- ### sdf_bounds_from_cnets: the sdf lower bound of these points (i.e., signed distance to the hyperplane)
- ### 1: support points; 2: sampled points
- ### empty: empty mask; sdf_: sdf mask
- ### contact_pts_new: ground truth mesh after icp with bundlesdf output mesh
- ### depth_pts_new: bundlesdf sampled points in occupied octree voxels along the rays
-
- # # obj_file = 'assets/mesh_cleaned_cube2.obj'
- # obj_file = 'results/cube_2/mesh_cleaned.obj'
- # mesh_cleaned = trimesh.load(obj_file, force='mesh')
- # mesh_cleaned_pts = mesh_cleaned.sample(10000)
- # mesh_cleaned_pts = torch.tensor(mesh_cleaned_pts)
- # visualize(mesh_cleaned_pts)
- # visualize('data/cube/cube_2/support_pts.pt')
- # visualize('data/cube/cube_2/sampled_pts.pt')
-
- # obj_file = 'pretrained.obj'
- # mesh_pretrain = trimesh.load(obj_file, force='mesh')
- # mesh_pretrain_pts = mesh_pretrain.sample(10000)
- # mesh_pretrain_pts = torch.tensor(mesh_pretrain_pts)
- # visualize_pts_sdfs(mesh_pretrain_pts, torch.zeros_like(mesh_pretrain_pts[:,0]))
- # pts = torch.load('data/cube/cube_2/support_pts.pt')
- # sdfs = torch.load('data/cube/cube_2/sdfs_from_cnets.pt')
- # pts = torch.load('data/cube/cube_2/sampled_pts.pt')
- # sdfs = torch.load('data/cube/cube_2/sdf_bounds_from_cnets.pt')
- # visualize_pts_sdfs(pts, sdfs)
-
- # visualize_two_pts_sdfs(mesh_pretrain_pts, None, mesh_cleaned_pts, None, 5000, False,False)
-
- # visualize_two_pts_sdfs('sdf_ptsall_1.pt', 'sdf_ptsall_1_predsdf.pt', mesh_cleaned_pts, None, 5000)
-
- # visualize_two_pts_sdfs('data/cube/cube_2/support_pts.pt', 'data/cube/cube_2/sdfs_from_cnets.pt', 'data/cube/cube_2/sampled_pts.pt', 'data/cube/cube_2/sdf_bounds_from_cnets.pt', 5000, False,False)
- # visualize_two('contact_pts_gt.pt', 'sdf_pts1.pt')
- # visualize_two('empty_pts1.pt', 'sdf_pts1.pt')
- # visualize_two('empty_pts2.pt', 'sdf_pts2.pt')
- # visualize_two('sdf_pts1.pt', 'depth_pts_new.pt')
- # visualize_two('contact_pts_new.pt', 'depth_pts_new.pt')
- # visualize_two('contact_pts_new.pt', 'sdf_ptsall_1.pt')
-
- # visualize_pts_sdfs('data/cube/cube_2/10traj/support_pts.pt', 'data/cube/cube_2/10traj/sdfs_from_cnets.pt')
- # visualize_pts_sdfs('assets/support_pts.pt', 'assets/sdfs_from_cnets.pt')
- # visualize_pts_sdfs('results/find_good_case/cube_9_1/cps_empty_pcd.pt', 'results/find_good_case/cube_9_1/cps_empty_sdf_pred.pt')
- # visualize_pts_sdfs('results/find_good_case/cube_9_1/cps_near_pcd.pt', 'results/find_good_case/cube_9_1/cps_near_sdf_pred.pt')
- visualize_pts_sdfs('results/find_good_case/cube_9_1/hps_empty_pcd.pt', 'results/find_good_case/cube_9_1/hps_empty_sdf_pred.pt')
- visualize_pts_sdfs('results/find_good_case/cube_9_1/hps_near_pcd.pt', 'results/find_good_case/cube_9_1/hps_near_sdf_pred.pt')
-
- # visualize_pts_sdfs('sdf_ptsall_1.pt', 'sdf_ptsall_1_gtsdf.pt')
- # visualize_pts_sdfs('empty_ptsall_1.pt', 'empty_ptsall_1_gtsdf.pt')
- # visualize_pts_sdfs('sdf_ptsall_2.pt', 'sdf_ptsall_2_gtsdflb.pt')
- # visualize_pts_sdfs('empty_ptsall_2.pt', 'empty_ptsall_2_gtsdflb.pt')
- # visualize_pts_sdfs('empty_ptsall_2.pt', 'empty_ptsall_2_predsdf.pt')
-
- # visualize_pts_sdfs('sdf_ptsall_1_s.pt', 'sdf_ptsall_1_gtsdf_s.pt')
- # visualize_pts_sdfs('empty_ptsall_1_s.pt', 'empty_ptsall_1_gtsdf_s.pt')
- # visualize_pts_sdfs('sdf_ptsall_2_s.pt', 'sdf_ptsall_2_gtsdflb_s.pt')
- # visualize_pts_sdfs('empty_ptsall_2_s_mt.pt', 'empty_ptsall_2_predsdf_s_mt.pt')
- # visualize_pts_sdfs('empty_ptsall_2_mt.pt', 'empty_ptsall_2_predsdf_mt.pt')
-
- # visualize_pts_sdfs('sdf_ptsall_1_r.pt', 'sdf_ptsall_1_gtsdf_r.pt')
- # visualize_pts_sdfs('empty_ptsall_1_r.pt', 'empty_ptsall_1_gtsdf_r.pt')
- # visualize_pts_sdfs('sdf_ptsall_2_r.pt', 'sdf_ptsall_2_gtsdflb_r.pt')
- # visualize_pts_sdfs('empty_ptsall_2_r.pt', 'empty_ptsall_2_gtsdflb_r.pt')
- # visualize_pts_sdfs('empty_ptsall_2_r.pt', 'empty_ptsall_2_predsdf_r.pt')
-
- # visualize_pts_sdfs('sdf_ptsall_1_d.pt', 'sdf_ptsall_1_gtsdf_d.pt')
- # visualize_pts_sdfs('empty_ptsall_1_d.pt', 'empty_ptsall_1_gtsdf_d.pt')
- # visualize_pts_sdfs('sdf_ptsall_2_d.pt', 'sdf_ptsall_2_gtsdflb_d.pt')
- # visualize_pts_sdfs('empty_ptsall_2_d.pt', 'empty_ptsall_2_gtsdflb_d.pt')
\ No newline at end of file