diff --git a/Dataset/Half_Circle_O/annotations_summary.csv b/Dataset/Half_Circle_O/annotations_summary.csv new file mode 100644 index 0000000..f9b469a --- /dev/null +++ b/Dataset/Half_Circle_O/annotations_summary.csv @@ -0,0 +1,6 @@ +Object_Name,Image_Path,X,Y,Width,Height,Img_Width,Img_Height +Half_Circle_O,Half_Circle_O_img_0.jpg,266,208,18,28,448,448 +Half_Circle_O,Half_Circle_O_img_1.jpg,363,279,26,22,448,448 +Half_Circle_O,Half_Circle_O_img_2.jpg,231,293,37,26,448,448 +Half_Circle_O,Half_Circle_O_img_3.jpg,163,213,22,30,448,448 +Half_Circle_O,Half_Circle_O_img_4.jpg,61,260,61,43,448,448 diff --git a/Dataset/Half_Circle_O/images/Half_Circle_O_img_0.jpg b/Dataset/Half_Circle_O/images/Half_Circle_O_img_0.jpg new file mode 100644 index 0000000..1517707 Binary files /dev/null and b/Dataset/Half_Circle_O/images/Half_Circle_O_img_0.jpg differ diff --git a/Dataset/Half_Circle_O/images/Half_Circle_O_img_1.jpg b/Dataset/Half_Circle_O/images/Half_Circle_O_img_1.jpg new file mode 100644 index 0000000..bf13f0a Binary files /dev/null and b/Dataset/Half_Circle_O/images/Half_Circle_O_img_1.jpg differ diff --git a/Dataset/Half_Circle_O/images/Half_Circle_O_img_2.jpg b/Dataset/Half_Circle_O/images/Half_Circle_O_img_2.jpg new file mode 100644 index 0000000..9d98a35 Binary files /dev/null and b/Dataset/Half_Circle_O/images/Half_Circle_O_img_2.jpg differ diff --git a/Dataset/Half_Circle_O/images/Half_Circle_O_img_3.jpg b/Dataset/Half_Circle_O/images/Half_Circle_O_img_3.jpg new file mode 100644 index 0000000..816d970 Binary files /dev/null and b/Dataset/Half_Circle_O/images/Half_Circle_O_img_3.jpg differ diff --git a/Dataset/Half_Circle_O/images/Half_Circle_O_img_4.jpg b/Dataset/Half_Circle_O/images/Half_Circle_O_img_4.jpg new file mode 100644 index 0000000..3a45b7f Binary files /dev/null and b/Dataset/Half_Circle_O/images/Half_Circle_O_img_4.jpg differ diff --git a/Dataset/Half_Circle_O/masks/Half_Circle_O_img_0.jpg b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_0.jpg new file mode 100644 index 0000000..d78d313 Binary files /dev/null and b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_0.jpg differ diff --git a/Dataset/Half_Circle_O/masks/Half_Circle_O_img_1.jpg b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_1.jpg new file mode 100644 index 0000000..13b28dc Binary files /dev/null and b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_1.jpg differ diff --git a/Dataset/Half_Circle_O/masks/Half_Circle_O_img_2.jpg b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_2.jpg new file mode 100644 index 0000000..f7b1d2a Binary files /dev/null and b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_2.jpg differ diff --git a/Dataset/Half_Circle_O/masks/Half_Circle_O_img_3.jpg b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_3.jpg new file mode 100644 index 0000000..68622a2 Binary files /dev/null and b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_3.jpg differ diff --git a/Dataset/Half_Circle_O/masks/Half_Circle_O_img_4.jpg b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_4.jpg new file mode 100644 index 0000000..5e4f776 Binary files /dev/null and b/Dataset/Half_Circle_O/masks/Half_Circle_O_img_4.jpg differ diff --git a/Dataset/Heart_O/annotations_summary.csv b/Dataset/Heart_O/annotations_summary.csv new file mode 100644 index 0000000..73d251a --- /dev/null +++ b/Dataset/Heart_O/annotations_summary.csv @@ -0,0 +1,6 @@ +Object_Name,Image_Path,X,Y,Width,Height,Img_Width,Img_Height +Heart_O,Heart_O_img_0.jpg,258,204,29,28,448,448 +Heart_O,Heart_O_img_1.jpg,356,271,28,28,448,448 +Heart_O,Heart_O_img_2.jpg,225,282,38,39,448,448 +Heart_O,Heart_O_img_3.jpg,162,207,32,31,448,448 +Heart_O,Heart_O_img_4.jpg,50,241,63,66,448,448 diff --git a/Dataset/Heart_O/images/Heart_O_img_0.jpg b/Dataset/Heart_O/images/Heart_O_img_0.jpg new file mode 100644 index 0000000..fcbc9ab Binary files /dev/null and b/Dataset/Heart_O/images/Heart_O_img_0.jpg differ diff --git a/Dataset/Heart_O/images/Heart_O_img_1.jpg b/Dataset/Heart_O/images/Heart_O_img_1.jpg new file mode 100644 index 0000000..b35cac0 Binary files /dev/null and b/Dataset/Heart_O/images/Heart_O_img_1.jpg differ diff --git a/Dataset/Heart_O/images/Heart_O_img_2.jpg b/Dataset/Heart_O/images/Heart_O_img_2.jpg new file mode 100644 index 0000000..5febf5e Binary files /dev/null and b/Dataset/Heart_O/images/Heart_O_img_2.jpg differ diff --git a/Dataset/Heart_O/images/Heart_O_img_3.jpg b/Dataset/Heart_O/images/Heart_O_img_3.jpg new file mode 100644 index 0000000..8cbabc2 Binary files /dev/null and b/Dataset/Heart_O/images/Heart_O_img_3.jpg differ diff --git a/Dataset/Heart_O/images/Heart_O_img_4.jpg b/Dataset/Heart_O/images/Heart_O_img_4.jpg new file mode 100644 index 0000000..1212a0f Binary files /dev/null and b/Dataset/Heart_O/images/Heart_O_img_4.jpg differ diff --git a/Dataset/Heart_O/masks/Heart_O_img_0.jpg b/Dataset/Heart_O/masks/Heart_O_img_0.jpg new file mode 100644 index 0000000..ca48633 Binary files /dev/null and b/Dataset/Heart_O/masks/Heart_O_img_0.jpg differ diff --git a/Dataset/Heart_O/masks/Heart_O_img_1.jpg b/Dataset/Heart_O/masks/Heart_O_img_1.jpg new file mode 100644 index 0000000..0d0f771 Binary files /dev/null and b/Dataset/Heart_O/masks/Heart_O_img_1.jpg differ diff --git a/Dataset/Heart_O/masks/Heart_O_img_2.jpg b/Dataset/Heart_O/masks/Heart_O_img_2.jpg new file mode 100644 index 0000000..56d3ed3 Binary files /dev/null and b/Dataset/Heart_O/masks/Heart_O_img_2.jpg differ diff --git a/Dataset/Heart_O/masks/Heart_O_img_3.jpg b/Dataset/Heart_O/masks/Heart_O_img_3.jpg new file mode 100644 index 0000000..18dd87a Binary files /dev/null and b/Dataset/Heart_O/masks/Heart_O_img_3.jpg differ diff --git a/Dataset/Heart_O/masks/Heart_O_img_4.jpg b/Dataset/Heart_O/masks/Heart_O_img_4.jpg new file mode 100644 index 0000000..e517088 Binary files /dev/null and b/Dataset/Heart_O/masks/Heart_O_img_4.jpg differ diff --git a/Dataset/Heart_W/annotations_summary.csv b/Dataset/Heart_W/annotations_summary.csv new file mode 100644 index 0000000..caa7d4c --- /dev/null +++ b/Dataset/Heart_W/annotations_summary.csv @@ -0,0 +1,6 @@ +Object_Name,Image_Path,X,Y,Width,Height,Img_Width,Img_Height +Heart_W,Heart_W_img_0.jpg,258,204,29,28,448,448 +Heart_W,Heart_W_img_1.jpg,196,93,29,30,448,448 +Heart_W,Heart_W_img_2.jpg,251,21,41,40,448,448 +Heart_W,Heart_W_img_3.jpg,262,159,40,41,448,448 +Heart_W,Heart_W_img_4.jpg,143,276,41,42,448,448 diff --git a/Dataset/Heart_W/images/Heart_W_img_0.jpg b/Dataset/Heart_W/images/Heart_W_img_0.jpg new file mode 100644 index 0000000..ae38023 Binary files /dev/null and b/Dataset/Heart_W/images/Heart_W_img_0.jpg differ diff --git a/Dataset/Heart_W/images/Heart_W_img_1.jpg b/Dataset/Heart_W/images/Heart_W_img_1.jpg new file mode 100644 index 0000000..9473d4b Binary files /dev/null and b/Dataset/Heart_W/images/Heart_W_img_1.jpg differ diff --git a/Dataset/Heart_W/images/Heart_W_img_2.jpg b/Dataset/Heart_W/images/Heart_W_img_2.jpg new file mode 100644 index 0000000..cf998ac Binary files /dev/null and b/Dataset/Heart_W/images/Heart_W_img_2.jpg differ diff --git a/Dataset/Heart_W/images/Heart_W_img_3.jpg b/Dataset/Heart_W/images/Heart_W_img_3.jpg new file mode 100644 index 0000000..3b0aefc Binary files /dev/null and b/Dataset/Heart_W/images/Heart_W_img_3.jpg differ diff --git a/Dataset/Heart_W/images/Heart_W_img_4.jpg b/Dataset/Heart_W/images/Heart_W_img_4.jpg new file mode 100644 index 0000000..34a62cb Binary files /dev/null and b/Dataset/Heart_W/images/Heart_W_img_4.jpg differ diff --git a/Dataset_Tools/augment_images.py b/Dataset_Tools/augment_images.py new file mode 100644 index 0000000..748230a --- /dev/null +++ b/Dataset_Tools/augment_images.py @@ -0,0 +1,62 @@ +import numpy as np +import imgaug as ia +import imgaug.augmenters as iaa + + +def create_augmenter(): + sometimes = lambda aug: iaa.Sometimes(0.5, aug) + seq = iaa.Sequential( + [ + # execute 0 to 5 of the following (less important) augmenters per image + # don't execute all of them, as that would often be way too strong + iaa.SomeOf((0, 3), + [ + sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation + iaa.OneOf([ + iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0 + iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7 + iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7 + ]), + iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images + iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images + # search either for all edges or for directed edges, + # blend the result with the original image using a blobby mask + iaa.SimplexNoiseAlpha(iaa.OneOf([ + iaa.EdgeDetect(alpha=(0.5, 1.0)), + iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)), + ])), + iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images + iaa.OneOf([ + iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels + iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2), + ]), + iaa.Invert(0.05, per_channel=True), # invert color channels + iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value) + iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation + # either change the brightness of the whole image (sometimes + # per channel) or change the brightness of subareas + iaa.OneOf([ + iaa.Multiply((0.5, 1.5), per_channel=0.5), + iaa.FrequencyNoiseAlpha( + exponent=(-4, 0), + first=iaa.Multiply((0.5, 1.5), per_channel=True), + second=iaa.LinearContrast((0.5, 2.0)) + ) + ]), + iaa.LinearContrast((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast + iaa.Grayscale(alpha=(0.0, 1.0)), + sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths) + sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around + sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1))) + ], + random_order=True + ) + ], + random_order=True + ) + return seq + +def augment_images(images:list): + '''augment images in training set only''' + augmenter = create_augmenter() + return augmenter(images=images) \ No newline at end of file diff --git a/Dataset_Tools/get_dataset.py b/Dataset_Tools/get_dataset.py new file mode 100644 index 0000000..437e226 --- /dev/null +++ b/Dataset_Tools/get_dataset.py @@ -0,0 +1,511 @@ +import bpy +import csv +import cv2 +from imutils.paths import list_images +import imutils +import math +import mathutils +import numpy as np +import os +import pathlib +from random import choice, random, randint, seed, uniform +import random +import sys +import string +import argparse + +DIR="/usr/local/lib/python3.7/site-packages/" +sys.path.append(DIR) +sys.path.append('') + +from Dataset_Tools.hdri_operators import rotate, add_new_sun, add_rotation_driver, calculate_sun_position + + +'''How to use: +blender --background --use-extension 1 -E CYCLES -t 0 -P 'get_dataset.py' +blender is your path to blender +for macos it is /Applications/Blender.app/Contents/MacOS/blender +''' + + + + +def updateCamera(camera, focus_point=mathutils.Vector((0.0, 0.0, 0.0)), distance=110.0): + """ + Focus the camera to a focus point and place the camera at a specific distance from that + focus point. The camera stays in a direct line with the focus point. + + :param camera: the camera object + :type camera: bpy.types.object + :param focus_point: the point to focus on (default=``mathutils.Vector((0.0, 0.0, 0.0))``) + :type focus_point: mathutils.Vector + :param distance: the distance to keep to the focus point (default=``10.0``) + :type distance: float + """ + looking_direction = camera.location - focus_point + rot_quat = looking_direction.to_track_quat('Z', 'Y') + + camera.rotation_euler = rot_quat.to_euler() + camera.location = rot_quat @ mathutils.Vector((0.0, 0.0, distance)) + + +def clamp(x, minimum, maximum): + return max(minimum, min(x, maximum)) + +def camera_view_bounds_2d(scene, cam_ob, me_ob): + """ + Returns camera space bounding box of mesh object. + + Negative 'z' value means the point is behind the camera. + + Takes shift-x/y, lens angle and sensor size into account + as well as perspective/ortho projections. + + :arg scene: Scene to use for frame size. + :type scene: :class:`bpy.types.Scene` + :arg obj: Camera object. + :type obj: :class:`bpy.types.Object` + :arg me: Untransformed Mesh. + :type me: :class:`bpy.types.Mesh´ + :return: a Box object (call its to_tuple() method to get x, y, width and height) + :rtype: :class:`Box` + """ + + mat = cam_ob.matrix_world.normalized().inverted() + depsgraph = bpy.context.evaluated_depsgraph_get() + mesh_eval = me_ob.evaluated_get(depsgraph) + me = mesh_eval.to_mesh() + me.transform(me_ob.matrix_world) + me.transform(mat) + + camera = cam_ob.data + frame = [-v for v in camera.view_frame(scene=scene)[:3]] + camera_persp = camera.type != 'ORTHO' + + lx = [] + ly = [] + + for v in me.vertices: + co_local = v.co + z = -co_local.z + + if camera_persp: + if z == 0.0: + lx.append(0.5) + ly.append(0.5) + # Does it make any sense to drop these? + # if z <= 0.0: + # continue + else: + frame = [(v / (v.z / z)) for v in frame] + + min_x, max_x = frame[1].x, frame[2].x + min_y, max_y = frame[0].y, frame[1].y + + x = (co_local.x - min_x) / (max_x - min_x) + y = (co_local.y - min_y) / (max_y - min_y) + + lx.append(x) + ly.append(y) + + min_x = clamp(min(lx), 0.0, 1.0) + max_x = clamp(max(lx), 0.0, 1.0) + min_y = clamp(min(ly), 0.0, 1.0) + max_y = clamp(max(ly), 0.0, 1.0) + + mesh_eval.to_mesh_clear() + + r = scene.render + fac = r.resolution_percentage * 0.01 + dim_x = r.resolution_x * fac + dim_y = r.resolution_y * fac + + # Sanity check + if round((max_x - min_x) * dim_x) == 0 or round((max_y - min_y) * dim_y) == 0: + return (0, 0, 0, 0) + + return ( + round(min_x * dim_x), # X + round(dim_y - max_y * dim_y), # Y + round((max_x - min_x) * dim_x), # Width + round((max_y - min_y) * dim_y) # Height + ) + + +class Colour: + + def __init__(self, value): + self._START = '#' + value = list(value) + if len(value) != 3: + raise ValueError('value must have a length of three') + self._values = value + + def __str__(self): + return self._START + ''.join('{:02X}'.format(v) for v in self) + + def __iter__(self): + return iter(self._values) + + def __getitem__(self, index): + return self._values[index] + + def __setitem__(self, index): + return self._values[index] + + @staticmethod + def from_string(string): + colour = iter(string) + if string[0] == self._START: + next(colour, None) + return Colour(int(''.join(v), 16) for v in zip(colour, colour)) + + @staticmethod + def random(): + return Colour(random.randrange(256) for _ in range(3)) + + def contrast(self): + return Colour(255 - v for v in self) + + @staticmethod + def hex_to_rgb(hex): + return tuple(int(hex[i:i+2], 16)/255 for i in (1, 2, 4)) + +def get_material_name(obj): + mesh = obj.data + mat_name = '' + for f in mesh.polygons: # iterate over faces + slot = obj.material_slots[f.material_index] + mat = slot.material + mat_name = mat.name + return mat_name + + + +def getBinaryMask(img_path, bgr_color): + img = cv2.imread(img_path) + img_height, img_width, _ = img.shape + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + mask = cv2.compare(gray,11,cv2.CMP_LT) + inverted_mask = cv2.bitwise_not(mask) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) + eroded_mask = cv2.erode(inverted_mask, kernel) + dilated_mask = cv2.dilate(eroded_mask, kernel) + + b, g, r = bgr_color + dilated_mask_3d = np.repeat(dilated_mask[:, :, np.newaxis], 3, axis=2) + dilated_mask_3d[:, :, 0] = np.where(dilated_mask_3d[:, :, 0]>0, b, dilated_mask_3d[:, :, 0]) + dilated_mask_3d[:, :, 1] = np.where(dilated_mask_3d[:, :, 1]>0, g, dilated_mask_3d[:, :, 1]) + dilated_mask_3d[:, :, 2] = np.where(dilated_mask_3d[:, :, 2]>0, r, dilated_mask_3d[:, :, 2]) + + return dilated_mask_3d + + +class ArgumentParserForBlender(argparse.ArgumentParser): + """ + This class is identical to its superclass, except for the parse_args + method (see docstring). It resolves the ambiguity generated when calling + Blender from the CLI with a python script, and both Blender and the script + have arguments. E.g., the following call will make Blender crash because + it will try to process the script's -a and -b flags: + >>> blender --python my_script.py -a 1 -b 2 + + To bypass this issue this class uses the fact that Blender will ignore all + arguments given after a double-dash ('--'). The approach is that all + arguments before '--' go to Blender, arguments after go to the script. + The following calls work fine: + >>> blender --python my_script.py -- -a 1 -b 2 + >>> blender --python my_script.py -- + """ + + def _get_argv_after_doubledash(self): + """ + Given the sys.argv as a list of strings, this method returns the + sublist right after the '--' element (if present, otherwise returns + an empty list). + """ + try: + idx = sys.argv.index("--") + return sys.argv[idx+1:] # the list after '--' + except ValueError as e: # '--' not in the list: + return [] + + # overrides superclass + def parse_args(self): + """ + This method is expected to behave identically as in the superclass, + except that the sys.argv list will be pre-processed using + _get_argv_after_doubledash before. See the docstring of the class for + usage examples and details. + """ + return super().parse_args(args=self._get_argv_after_doubledash()) + +# GPU +bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' +bpy.context.scene.cycles.device = 'GPU' + + +seed(1) + +parser = ArgumentParserForBlender() +parser.add_argument("--shape", type=str, required=True, choices=['Half_Circle', 'Circle', 'Heart', 'Plus', 'Square', 'Triangle']) +parser.add_argument("--alphanumeric", type=str, required=True, help='uppercase letter A-Z or number 0-9') +parser.add_argument("--num_images", type=int, required=True, help='number of images per shape alphanumeric pair') +args = parser.parse_args() + +shapes_list = ['Half_Circle', 'Circle', 'Heart', 'Plus', 'Square', 'Triangle'] +alphanumeric_list = list(string.ascii_uppercase) + [str(i) for i in range(10)] +color_matrix = {'Half_Circle': (255, 0, 0), 'Circle': (0, 255, 0), 'Heart': (0, 0, 255), + 'Plus': (255, 255, 0), 'Square': (0, 255, 255), 'Triangle': (255, 255, 255)} +shape_name = args.shape +alphanumeric_name = args.alphanumeric +num_images = args.num_images + + + +# import object +shape_path = os.path.join(os.getcwd(), 'shapes', shape_name + '.obj') +alphanumeric_path = os.path.join(os.getcwd(), 'alphanumeric', alphanumeric_name + '.obj') +bpy.ops.import_scene.obj( filepath = shape_path, filter_glob="*.obj;*.mtl" ) +bpy.ops.import_scene.obj( filepath = alphanumeric_path, filter_glob="*.obj;*.mtl" ) +object_name = shape_name + '_' + alphanumeric_name +dataset_path = os.path.join(os.getcwd(), 'Dataset', object_name) +dataset_img_path = os.path.join(os.getcwd(), 'Dataset', object_name, 'images') +dataset_mask_path = os.path.join(os.getcwd(), 'Dataset', object_name, 'masks') +if not os.path.exists(dataset_path): + os.mkdir(dataset_path) +if not os.path.exists(dataset_img_path): + os.mkdir(dataset_img_path) +if not os.path.exists(dataset_mask_path): + os.mkdir(dataset_mask_path) +annotations_summary_save_path = os.sep.join(dataset_img_path.split(os.sep)[:-1]) + +if not os.path.isdir(dataset_img_path): + os.makedirs(dataset_img_path) + + +# unlink cube +target = bpy.data.objects['Cube'] +objs = bpy.data.objects +objs.remove(objs["Cube"], do_unlink=True) + +alphanumeric = None +shape = None +bpy.ops.object.select_all(action='DESELECT') +for obj in bpy.context.scene.objects: + if 'Camera' not in obj.name and 'Light' not in obj.name: + if any([True for shape in shapes_list if shape in obj.name]): + obj.name = shape_name + shape = bpy.data.objects[obj.name] + elif any([True for alphanumeric in alphanumeric_list if alphanumeric in obj.name]): + obj.name = alphanumeric_name + alphanumeric = bpy.data.objects[obj.name] + bpy.data.objects[obj.name].select_set(True) + + +# target settings +bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') +alphanumeric.location = (0, 0, .001) +shape.location = (0, 0, 0) +dg = bpy.context.evaluated_depsgraph_get() +alphanumeric.rotation_mode = 'XYZ' +shape.rotation_mode = 'XYZ' + +# original dimensions +shape_x_dim = shape.dimensions.x +shape_y_dim = shape.dimensions.y +shape_z_dim = shape.dimensions.z + +# prep shape/alphanumeric size +def meters_to_feet(target, feet, orig_x_dim, orig_y_dim, orig_z_dim): + feet_to_meters = 0.3048 * feet + largest_dimension = max((orig_x_dim, orig_y_dim, orig_z_dim)) + muliplier = feet_to_meters / largest_dimension + target.dimensions=(muliplier*orig_x_dim, muliplier*orig_y_dim, muliplier*orig_z_dim) + + + +# scale range is scale for alphanumeric to fit shape, lower bound is scale for alphanumeric to fit shape of size feet=1, upper bound is scale for alphanumeric to fit shape of size feet=2 +scale_range_dict = {'Half_Circle': (6, 12), + 'Circle': (9, 18), + 'Heart': (8, 16), + 'Plus': (7.5, 15), + 'Square': (10, 22), + 'Triangle': (5.5, 11)} + + +# print names of selected objects +sel = bpy.context.selected_objects +for obj in sel: + print("selected obj:", obj.name) + + +# settings for rendered image +res_x = 448 +res_y = 448 +bpy.data.scenes["Scene"].render.resolution_x = res_x +bpy.data.scenes["Scene"].render.resolution_y = res_y +bpy.context.scene.render.image_settings.file_format='JPEG' + +# set up randomized backgrounds +background_image_paths = list(list_images('Supplementary_Dataset/Backgrounds')) + +# set up background image +bpy.context.scene.render.film_transparent = True +bpy.context.scene.use_nodes = True +tree = bpy.context.scene.node_tree +composite = tree.nodes[0] +render_layers = tree.nodes[1] +alpha_over = tree.nodes.new(type='CompositorNodeAlphaOver') +background_img_node = tree.nodes.new(type="CompositorNodeImage") +scale_node = tree.nodes.new(type="CompositorNodeScale") +links = tree.links +link_1 = links.new(render_layers.outputs[0], alpha_over.inputs[2]) +link_2 = links.new(alpha_over.outputs[0], composite.inputs[0]) +link_3 = links.new(background_img_node.outputs[0], scale_node.inputs[0]) +link_4 = links.new(scale_node.outputs[0], alpha_over.inputs[1]) +bpy.data.scenes["Scene"].node_tree.nodes["Scale"].space = 'RENDER_SIZE' + + + + + + +# camera settings +cam = bpy.data.objects['Camera'] +bpy.data.cameras['Camera'].type = 'PERSP' +cam.rotation_euler = [0, 0, 0] +cam.location = [0, 0, 0] +cam.data.clip_end = 1e+08 +feet_to_meters = 0.3048 +camera_dist_range = (10 * feet_to_meters, 40 * feet_to_meters) +camera_location_range = (-2.5, 2.5) + + + + +# hdri +bpy.ops.preferences.addon_install(filepath=os.path.join(os.getcwd(), 'Dataset_Tools/hdri-sun-aligner-1_5.zip')) +bpy.ops.preferences.addon_enable(module='hdri-sun-aligner-1_5') +hdri_dataset_path = 'Supplementary_Dataset/HDRI' +hdri_images = [] +for f in os.scandir(hdri_dataset_path): + if os.path.isfile(f.path): + hdri_images.append(f.path) +scene = bpy.context.scene +scene.render.film_transparent = True +# set up hdri nodes +world = bpy.context.scene.world +world.use_nodes = True +node_tree = world.node_tree +links = node_tree.links +background_node = world.node_tree.nodes['Background'] +environment_texture_node = node_tree.nodes.new(type="ShaderNodeTexEnvironment") +mapping_node = node_tree.nodes.new(type="ShaderNodeMapping") +tex_coord_node = node_tree.nodes.new(type="ShaderNodeTexCoord") +node_tree.links.new(tex_coord_node.outputs["Generated"], mapping_node.inputs["Vector"]) +node_tree.links.new(mapping_node.outputs["Vector"], environment_texture_node.inputs["Vector"]) +node_tree.links.new(environment_texture_node.outputs["Color"], node_tree.nodes["Background"].inputs["Color"]) + + +img_id = 0 +with open(os.path.join(annotations_summary_save_path, 'annotations_summary.csv'), 'w+', newline='') as annotations_summary: + summary_writer = csv.writer(annotations_summary, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) + # don't rewrite the headers when appending new rows. open append (at end), seek to the top and read it all the way until the end, your at the end again for writing. + annotations_summary.seek(os.SEEK_SET) + annotations_summary = [row for row in csv.DictReader(annotations_summary)] + if len(annotations_summary) == 0: + summary_writer.writerow(['Object_Name', 'Image_Path', 'X', 'Y', 'Width', 'Height', 'Img_Width', 'Img_Height']) + + while img_id < num_images: + # random size + random_feet = uniform(1, 2) + percent_inc = random_feet - 1 + new_alphanumeric_scale = (scale_range_dict[shape_name][1] - scale_range_dict[shape_name][0]) * percent_inc + scale_range_dict[shape_name][0] + meters_to_feet(shape, random_feet, shape_x_dim, shape_y_dim, shape_z_dim) + alphanumeric.scale = (new_alphanumeric_scale, new_alphanumeric_scale, new_alphanumeric_scale) + + # random color + base = Colour.random() + color_1 = str(base) + color_2 = str(base.contrast()) + alphanumeric_rgb = Colour.hex_to_rgb(color_1) + shape_rgb = Colour.hex_to_rgb(color_2) + material_name = get_material_name(shape) + bpy.data.materials[material_name].node_tree.nodes["Principled BSDF"].inputs[0].default_value = (alphanumeric_rgb[0], alphanumeric_rgb[1], alphanumeric_rgb[2], 1) + material_name = get_material_name(alphanumeric) + bpy.data.materials[material_name].node_tree.nodes["Principled BSDF"].inputs[0].default_value = (shape_rgb[0], shape_rgb[1], shape_rgb[2], 1) + + # update camera + random_cam_dist = uniform(camera_dist_range[0], camera_dist_range[1]) + percent_inc = (random_cam_dist - camera_dist_range[0]) / camera_dist_range[0] + 1 + percent_inc = random_cam_dist / camera_dist_range[0] + random_cam_location_x = uniform(feet_to_meters*camera_location_range[0] * percent_inc, feet_to_meters*camera_location_range[1] * percent_inc) + random_cam_location_y = uniform(feet_to_meters*camera_location_range[0] * percent_inc, feet_to_meters*camera_location_range[1] * percent_inc) + cam.location = (random_cam_location_x, random_cam_location_y, random_cam_dist) + cam.rotation_euler = [0, 0, uniform(0, 2*math.pi)] + + # render img + fn = dataset_img_path + '/' + object_name + '_img_{}.jpg'.format(img_id) + print(fn) + bpy.data.scenes["Scene"].render.filepath = fn + bpy.ops.render.render(write_still=True) + + # write annotations + x, y, w, h = camera_view_bounds_2d(bpy.context.scene, bpy.context.scene.camera, shape) + img_name = object_name + '_img_{}.jpg'.format(img_id) + + summary_writer.writerow([object_name, img_name, x, y, w, h, res_x, res_y]) + + + # save mask + mask = getBinaryMask(fn, color_matrix[shape_name]) + cv2.imwrite(dataset_mask_path + '/' + object_name + '_img_{}.jpg'.format(img_id), mask) + + # resave img with random background image and hdri + random_hdri = choice(hdri_images) + environment_texture_node.image = bpy.data.images.load(random_hdri) + print('HDRI__', environment_texture_node.image) + # set up sun aligned with hdri + for obj in bpy.data.objects: + if 'HDRI Sun' == obj.name: + bpy.data.objects.remove(bpy.data.objects['HDRI Sun'], do_unlink=True) + context = bpy.context + add_new_sun(context) + calculate_sun_position(context) + add_rotation_driver(context) + rotate(context) + sun = bpy.data.objects['HDRI Sun'] + sun.data.energy = uniform(0.5, 1.5) + mapping_node.inputs['Rotation'].default_value = (0.0, 0.0, random.random()*360) + # make sure to rotate sun + context = bpy.context + add_rotation_driver(context) + rotate(context) + + background_img = choice(background_image_paths) + background_img_node.image = bpy.data.images.load(background_img) + fn = dataset_img_path + '/' + object_name + '_img_{}.jpg'.format(img_id) + print(fn) + bpy.data.scenes["Scene"].render.filepath = fn + bpy.ops.render.render(write_still=True) + + # remove hdri and background + bpy.data.images.remove(environment_texture_node.image) + bpy.data.images.remove(background_img_node.image) + cam.location = [0, 0, 0] + img_id += 1 + + + + + + + + + + + + + + diff --git a/Dataset_Tools/hdri-sun-aligner-1_5.zip b/Dataset_Tools/hdri-sun-aligner-1_5.zip new file mode 100644 index 0000000..c5cbedb Binary files /dev/null and b/Dataset_Tools/hdri-sun-aligner-1_5.zip differ diff --git a/Dataset_Tools/hdri_operators.py b/Dataset_Tools/hdri_operators.py new file mode 100755 index 0000000..d49b5ba --- /dev/null +++ b/Dataset_Tools/hdri_operators.py @@ -0,0 +1,321 @@ +# Blender imports +import bpy.types + +# Other imports +import numpy as np +from math import pi, cos, sin +import mathutils + + +def rotate(context): + """Rotate active object in alignment with sun position""" + + bl_idname = "hdrisa.rotate" + bl_label = "Rotate active object in alignment with sun position." + bl_options = {'REGISTER'} + + + def execute(context): + scene = context.scene + object = bpy.data.objects['HDRI Sun'] + + longitude = scene.hdri_sa_props.long_deg * (pi/180) # Convert to radians + latitude = scene.hdri_sa_props.lat_deg * (pi/180) + + # Calculate a vector pointing from the longitude and latitude to origo + # See https://vvvv.org/blog/polar-spherical-and-geographic-coordinates + x = cos(latitude) * cos(longitude) + y = cos(latitude) * sin(longitude) + z = sin(latitude) + + # Define euler rotation according to the vector + vector = mathutils.Vector([x, -y, z]) # "-y" to match Blender coordinate system + up_axis = mathutils.Vector([0.0, 0.0, 1.0]) + angle = vector.angle(up_axis, 0) + axis = up_axis.cross(vector) + euler = mathutils.Matrix.Rotation(angle, 4, axis).to_euler() + + # Store z-rotation value as property, used for driver calculation + scene.hdri_sa_props.z_org = euler.z + + # Rotate selected object + object.rotation_euler = euler + + return {'FINISHED'} + + execute(context) + + +def add_new_sun(context): + """Add a new sun, rotated in alignment with current sun position""" + + bl_idname = "hdrisa.add_new_sun" + bl_label = "Add a new sun, rotated in alignment with current sun position." + bl_options = {'REGISTER'} + + def make_collection(collection_name): + # Check if collection already exists + if collection_name in bpy.data.collections: + return bpy.data.collections[collection_name] + # If not, create new collection + else: + new_collection = bpy.data.collections.new(collection_name) + bpy.context.scene.collection.children.link(new_collection) + return new_collection + + def execute(context): + # Deselect objects + for obj in context.selected_objects: + obj.select_set(state=False) + + # Create new collection if it doesn't exist + new_collection = make_collection("HDRI Sun Aligner") + + # Create new sun object in the collection + sun_data = bpy.data.lights.new(name="HDRI Sun", type='SUN') + sun_object = bpy.data.objects.new(name="HDRI Sun", object_data=sun_data) + new_collection.objects.link(sun_object) + + # Select sun object and rotate + sun_object.select_set(state=True) + context.view_layer.objects.active = sun_object + rotate(context) + + return {'FINISHED'} + + execute(context) + + + + +def add_rotation_driver(context): + """Add a a driver to the active object z-rotation, based on HDRI mapping node""" + + bl_idname = "hdrisa.add_rotation_driver" + bl_label = "Add a a driver to the active object z-rotation, based on HDRI rotation using mapping node." + bl_options = {'REGISTER'} + + def execute(context): + scene = context.scene + object = bpy.data.objects['HDRI Sun'] + + mapping_node = None + world_nodes = scene.world.node_tree.nodes # All nodes for the World + + for node in world_nodes: + # Find the Vector Mapping node + if isinstance(node, bpy.types.ShaderNodeMapping): + mapping_node = node.name + break + + if mapping_node: + # Check for mapping node attributes in Blender 2.80 + if hasattr(world_nodes[mapping_node], 'rotation'): + # Path to HDRI mapping node z-rotation value + data_path = f'node_tree.nodes["{mapping_node}"].rotation[2]' + + # If not, assume Blender 2.81 mapping node attributes + else: + # Path to HDRI mapping node z-rotation value + data_path = f'node_tree.nodes["{mapping_node}"].inputs["Rotation"].default_value[2]' + + # Driver for z rotation + z_rotation_driver = object.driver_add('rotation_euler', 2) + + hdri_z = z_rotation_driver.driver.variables.new() # HDRI mapping node + obj_z = z_rotation_driver.driver.variables.new() # Object original rotation + + hdri_z.name = "hdri_z" + hdri_z.targets[0].id_type = 'WORLD' + hdri_z.targets[0].id = scene.world + hdri_z.targets[0].data_path = data_path + + obj_z.name = "obj_z" + obj_z.targets[0].id_type = 'SCENE' + obj_z.targets[0].id = scene + obj_z.targets[0].data_path = 'hdri_sa_props.z_org' + + z_rotation_driver.driver.expression = obj_z.name + '-' + hdri_z.name + + else: + msg = "No Mapping node defined for HDRI rotation." + bpy.ops.message.messagebox('INVOKE_DEFAULT', message=msg) + return {'CANCELLED'} + + + return {'FINISHED'} + + execute(context) + + + + + +def calculate_sun_position(context): + """Calculate the brightest spot in the HDRI image used for the environment""" + + bl_idname = "hdrisa.calculate_sun_position" + bl_label = "Calculate sun position." + bl_options = {'REGISTER', 'UNDO'} + + def create_circular_mask(h, w, thickness, center=None, radius=None): + """Create a circular mask used for drawing on the HDRI preview.""" + + if center is None: # use the middle of the image + center = [int(w/2), int(h/2)] + if radius is None: # use the smallest distance between the center and image walls + radius = min(center[0], center[1], w-center[0], h-center[1]) + + Y, X = np.ogrid[:h, :w] + dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2) + + mask = np.logical_and(dist_from_center <= radius, dist_from_center >= (radius - thickness)) + + return mask + + def gaussian_blur(gray_image, sigma): + """ Apply gaussion blur to a grayscale image. + + Input: + - 2D Numpy array + - sigma (gaussian blur radius) + Return: + - 2D Numpy array (blurred image) + See https://scipython.com/book/chapter-6-numpy/examples/blurring-an-image-with-a-two-dimensional-fft/ + """ + + rows, cols = gray_image.shape + + # Take the 2-dimensional DFT and centre the frequencies + ftimage = np.fft.fft2(gray_image) + ftimage = np.fft.fftshift(ftimage) + + # Build and apply a Gaussian filter. + sigmax = sigma + sigmay = sigma + cy, cx = rows/2, cols/2 + y = np.linspace(0, rows, rows) + x = np.linspace(0, cols, cols) + X, Y = np.meshgrid(x, y) + gmask = np.exp(-(((X-cx)/sigmax)**2 + ((Y-cy)/sigmay)**2)) + + ftimagep = ftimage * gmask + + # Take the inverse transform + imagep = np.fft.ifft2(ftimagep) + imagep = np.abs(imagep) + + return imagep + + def process_hdri(image): + """ + Calculate the brightest point in the equirectangular HDRI image (i.e. the sun or brightest light). + A gaussian blur is applied to the image to prevent errors from single bright pixels. + Update the "hdri_preview" image and return the longitude and latitude in degrees. + """ + + # Get a flat Numpy array with the image pixels + hdri_img = np.array(image.pixels[:]) + + width, height = image.size + depth = 4 # RGBA + + # Reshape to RGBA matrix + hdri_img = np.array(hdri_img).reshape([height, width, depth]) + + # Get image dimensions + height, width = hdri_img.shape[:2] + + # Convert to grayscale + gray_img = np.dot(hdri_img[...,:3], [0.299, 0.587, 0.114]) + + # Apply gaussian blur + gray_img = gaussian_blur(gray_img, sigma=100) + + # Find index of maximum value from 2D numpy array + result = np.where(gray_img == np.amax(gray_img)) + + # zip the 2 arrays to get the exact coordinates + list_of_coordinates = list(zip(result[0], result[1])) + + # Assume only one maximum, use the first found + max_loc_new = list_of_coordinates[0] + + # Get x and y coordinates for the brightest pixel + max_x = max_loc_new[1] + max_y = max_loc_new[0] + + # Create masks to indicate sun position + circle_mask = create_circular_mask(height, width, thickness=4, center=[max_x, max_y], radius=50) + point_mask = create_circular_mask(height, width, thickness=4, center=[max_x, max_y], radius=5) + + # Draw circle + hdri_img[:, :, 0][circle_mask] = 1 # Red + hdri_img[:, :, 1][circle_mask] = 0 # Green + hdri_img[:, :, 2][circle_mask] = 0 # Blue + + # Draw center dot + hdri_img[:, :, 0][point_mask] = 1 + hdri_img[:, :, 1][point_mask] = 0 + hdri_img[:, :, 2][point_mask] = 0 + + # Find the point in longitude and latitude (degrees) + long_deg = ((max_x * 360) / width) - 180 + lat_deg = -(((max_y * -180) / height) + 90) + + # Flatten array and update the blender image object + image.pixels = hdri_img.ravel() + + return long_deg, lat_deg + + def invoke(context): + scene = context.scene + screen = context.screen + world_nodes = scene.world.node_tree.nodes # All nodes for the World + image = None + + # Cleanup to prevent duplicate images + for img in bpy.data.images: + name = img.name + if name.startswith("hdri_sa_preview"): + bpy.data.images.remove(img) + + # Check if an environmental image is defined + for node in world_nodes: + # Find the Environment Texture node + if isinstance(node, bpy.types.ShaderNodeTexEnvironment): + image = node.image + + if image: + # Make a copy of the original HDRI + hdri_preview = image.copy() + + hdri_preview.name = "hdri_sa_preview." + image.file_format + + # Get image dimensions + org_width = hdri_preview.size[0] + org_height = hdri_preview.size[1] + + # Scale image if it's larger than 1k for improving performance + if org_width > 1024: + new_width = 1024 + new_height = int(org_height * (new_width / org_width)) + hdri_preview.scale(new_width, new_height) + else: + msg = "Please add an Environment Texture for the world." + bpy.ops.message.messagebox('INVOKE_DEFAULT', message=msg) + return {'CANCELLED'} + + # Calculate longitude, latitude and update HDRI preview image + long_deg, lat_deg = process_hdri(hdri_preview) + + # Update properties + scene.hdri_sa_props.long_deg = long_deg + scene.hdri_sa_props.lat_deg = lat_deg + scene.hdri_sa_props.sun_position_calculated = True + + return {'FINISHED'} + + invoke(context) + + \ No newline at end of file diff --git a/Dataset_Tools/run_get_dataset.py b/Dataset_Tools/run_get_dataset.py new file mode 100644 index 0000000..8c50fa6 --- /dev/null +++ b/Dataset_Tools/run_get_dataset.py @@ -0,0 +1,22 @@ +import argparse +import subprocess +import string +import sys +sys.path.append('') + + +if __name__=='__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--num_images", type=int, required=True, help='number of images per shape letter pair') + parser.add_argument("--blender_path", type=str, required=True, help='path to blender on your device') + args = parser.parse_args() + + shapes_list = ['Half_Circle', 'Circle', 'Heart', 'Plus', 'Square', 'Triangle'] + alphanumeric_list = list(string.ascii_uppercase) + [str(i) for i in range(10)] + + for shape in shapes_list: + for alphanumeric in alphanumeric_list: + command = "{} --background --use-extension 1 -E CYCLES -t 0 -P 'get_dataset.py' -- --shape {} --alphanumeric {} --num_images {}".format(args.blender_path, shape, alphanumeric, args.num_images) + process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True) + output = process.stdout + print(output) \ No newline at end of file diff --git a/Mapping/combine_images.py b/Mapping/combine_images.py new file mode 100644 index 0000000..ec1342c --- /dev/null +++ b/Mapping/combine_images.py @@ -0,0 +1,83 @@ +import utm +import cv2 +import numpy as np + + + +def paste_image(new_img, old_img, new_img_center_lat, new_img_center_long, old_img_center_x: 'utm coords', old_img_center_y: 'utm coords'): + """ + new_img: image you want to add to old image + old_img: one image that is a collection of images that were previously placed together + """ + new_img_h, new_img_w, _ = new_img.shape + new_img_center_x, new_img_center_y, _, _ = utm.from_latlon(new_img_center_lat, new_img_center_long) + new_img_left = new_img_center_x - (new_img_w / 2) + new_img_right = new_img_center_x + (new_img_w / 2) + new_img_top = new_img_center_y + (new_img_h / 2) + new_img_bottom = new_img_center_y - (new_img_h / 2) + + old_img_h, old_img_w, _, = old_img.shape + old_img_left = old_img_center_x - (old_img_w / 2) + old_img_right = old_img_center_x + (old_img_w / 2) + old_img_top = old_img_center_y + (old_img_h / 2) + old_img_bottom = old_img_center_y - (old_img_h / 2) + + final_img_left = min(new_img_left, old_img_left) + final_img_right = max(new_img_right, old_img_right) + final_img_bottom = min(new_img_bottom, old_img_bottom) + final_img_top = max(new_img_top, old_img_top) + final_img_h = final_img_top - final_img_bottom + final_img_w = final_img_right - final_img_left + + new_utm_center_x = final_img_left + (final_img_w / 2) + new_utm_center_y = final_img_top - (final_img_h / 2) + + # move utm coords to positive origin + final_img_right = final_img_right - final_img_left + final_img_top -= final_img_bottom + final_img_left = 0 + final_img_bottom = 0 + + # find new coords of new and old images w.r.t. moved final img + new_img_adj_left, new_img_adj_right, new_img_adj_top, new_img_adj_bottom = 0, 0, 0, 0 + old_img_adj_left, old_img_adj_right, old_img_adj_top, old_img_adj_bottom = 0, 0, 0, 0 + if new_img_left > old_img_left: + new_img_adj_left = 0 + new_img_adj_right = new_img_w + old_img_adj_left = final_img_right - old_img_w + old_img_adj_right = final_img_right + else: + new_img_adj_left = final_img_right - new_img_w + new_img_adj_right = final_img_right + old_img_adj_left = 0 + old_img_adj_right = old_img_w + + if new_img_bottom > old_img_bottom: + new_img_adj_bottom = 0 + new_img_adj_top = new_img_h + old_img_adj_bottom = final_img_top - old_img_h + old_img_adj_top = final_img_top + else: + new_img_adj_bottom = final_img_top - new_img_h + new_img_adj_top = final_img_top + old_img_adj_bottom = 0 + old_img_adj_top = old_img_h + + # place two images on final image + final_img = np.zeros((int(final_img_h), int(final_img_w), 3), np.uint8) + final_img[int(new_img_adj_bottom):int(new_img_adj_top), int(new_img_adj_left):int(new_img_adj_right),:3] = new_img + final_img[int(old_img_adj_bottom):int(old_img_adj_top), int(old_img_adj_left):int(old_img_adj_right),:3] = old_img + + cv2.imshow('Final Image', final_img) + cv2.waitKey(0) + + return new_utm_center_x, new_utm_center_y + + +if __name__=='__main__': + # skeleton of what code needs + latlon_dict = {'new_image': (lat, lon)} + utm_center_coords = {'old_image': (x, y)} + new_img = cv2.imread(new_img_path) + old_img = cv2.imread(old_img_path) + paste_image(new_img, old_img, latlon_dict['new_image'][0], latlon_dict['new_image'][1], utm_center_coords['old_image'][0], utm_center_coords['old_image'][1]) diff --git a/Mapping/get_gps_data.py b/Mapping/get_gps_data.py new file mode 100644 index 0000000..6324f36 --- /dev/null +++ b/Mapping/get_gps_data.py @@ -0,0 +1,120 @@ +from PIL import Image +from PIL.ExifTags import TAGS, GPSTAGS +from osgeo import gdal,ogr,osr +import affine + +def get_exif_data(image): + exif_data = {} + info = image._getexif() + if info: + for tag, value in info.items(): + decoded = TAGS.get(tag, tag) + if decoded == "GPSInfo": + gps_data = {} + for t in value: + sub_decoded = GPSTAGS.get(t, t) + gps_data[sub_decoded] = value[t] + + exif_data[decoded] = gps_data + else: + exif_data[decoded] = value + + return exif_data + +def _get_if_exist(data, key): + if key in data: + return data[key] + else: + pass + +def get_decimal_coordinates(info): + for key in ['Latitude', 'Longitude']: + if 'GPS'+key in info and 'GPS'+key+'Ref' in info: + e = info['GPS'+key] + ref = info['GPS'+key+'Ref'] + info[key] = ( e[0][0]/e[0][1] + + e[1][0]/e[1][1] / 60 + + e[2][0]/e[2][1] / 3600 + ) * (-1 if ref in ['S','W'] else 1) + + if 'Latitude' in info and 'Longitude' in info: + return [info['Latitude'], info['Longitude']] + +def get_lat_lon(exif_data): + gps_info = exif_data["GPSInfo"] + lat = None + lon = None + + if "GPSInfo" in exif_data: + gps_info = exif_data["GPSInfo"] + + gps_latitude = _get_if_exist(gps_info, "GPSLatitude") + gps_latitude_ref = _get_if_exist(gps_info, "GPSLatitudeRef") + gps_longitude = _get_if_exist(gps_info, "GPSLongitude") + gps_longitude_ref = _get_if_exist(gps_info, "GPSLongitudeRef") + + if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref: + lat, lon = get_decimal_coordinates(gps_info) + # if gps_latitude_ref != "N": + # lat = 0 - lat + + # if gps_longitude_ref != "E": + # lon = 0 - lon + + return lat, lon + + +def ReprojectCoords(coords,src_srs,tgt_srs): + ''' Reproject a list of x,y coordinates. + + @type geom: C{tuple/list} + @param geom: List of [[x,y],...[x,y]] coordinates + @type src_srs: C{osr.SpatialReference} + @param src_srs: OSR SpatialReference object + @type tgt_srs: C{osr.SpatialReference} + @param tgt_srs: OSR SpatialReference object + @rtype: C{tuple/list} + @return: List of transformed [[x,y],...[x,y]] coordinates + ''' + trans_coords=[] + transform = osr.CoordinateTransformation( src_srs, tgt_srs) + for x,y in coords: + x,y,z = transform.TransformPoint(x,y) + trans_coords.append([x,y]) + return trans_coords + + +def retrieve_pixel_value(geo_coord, data_source): + # https://gis.stackexchange.com/questions/221292/retrieve-pixel-value-with-geographic-coordinate-as-input-with-gdal + """Return floating-point value that corresponds to given point.""" + x, y = geo_coord[0], geo_coord[1] + forward_transform = affine.Affine.from_gdal(*data_source.GetGeoTransform()) + reverse_transform = ~forward_transform + px, py = reverse_transform * (x, y) + px, py = int(px + 0.5), int(py + 0.5) + pixel_coord = px, py + return pixel_coord + + +if __name__ == "__main__": + '''starting point to create image masks for output map for continuously expanding map''' + # get center gps coords of new image + image = Image.open("image-proc_2020-21/Mapping/images/DJI_0025.JPG") + exif_data = get_exif_data(image) + lat, lon = get_lat_lon(exif_data) + print(lat, lon) + + # get projection data from tif file + raster='image-proc_2020-21/odm_orthophoto/odm_orthophoto.tif' + ds=gdal.Open(raster) + src_srs=osr.SpatialReference() + src_srs.ImportFromWkt(ds.GetProjection()) + tgt_srs = src_srs.CloneGeogCS() + + # convert gps to utm + utm = latlon=ReprojectCoords([(lat, lon)],tgt_srs,src_srs) + print(utm) + + # convert utm to pixel coords on tif image + pixel_coord_x, pixel_coord_y = retrieve_pixel_value(utm[0], ds) + print(pixel_coord_x, pixel_coord_y) \ No newline at end of file diff --git a/alphanumeric/0.mtl b/alphanumeric/0.mtl new file mode 100644 index 0000000..ede94b6 --- /dev/null +++ b/alphanumeric/0.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.019 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/0.obj b/alphanumeric/0.obj new file mode 100644 index 0000000..765a76e --- /dev/null +++ b/alphanumeric/0.obj @@ -0,0 +1,487 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 0.mtl +o Curve.009_Curve.099 +v -0.001493 0.000000 -0.008271 +v -0.002174 0.000000 -0.008137 +v -0.002818 0.000000 -0.007949 +v -0.003423 0.000000 -0.007708 +v -0.003988 0.000000 -0.007416 +v -0.004512 0.000000 -0.007072 +v -0.004995 0.000000 -0.006678 +v -0.005435 0.000000 -0.006235 +v -0.005833 0.000000 -0.005742 +v -0.006186 0.000000 -0.005202 +v -0.006494 0.000000 -0.004614 +v -0.006756 0.000000 -0.003979 +v -0.006972 0.000000 -0.003299 +v -0.007048 0.000000 -0.003000 +v -0.007116 0.000000 -0.002686 +v -0.007176 0.000000 -0.002357 +v -0.007229 0.000000 -0.002018 +v -0.007273 0.000000 -0.001668 +v -0.007308 0.000000 -0.001311 +v -0.007335 0.000000 -0.000949 +v -0.007353 0.000000 -0.000583 +v -0.007362 0.000000 -0.000215 +v -0.007362 0.000000 0.000152 +v -0.007351 0.000000 0.000517 +v -0.007332 0.000000 0.000877 +v -0.007279 0.000000 0.001494 +v -0.007203 0.000000 0.002080 +v -0.007105 0.000000 0.002637 +v -0.006983 0.000000 0.003165 +v -0.006837 0.000000 0.003666 +v -0.006667 0.000000 0.004142 +v -0.006471 0.000000 0.004594 +v -0.006249 0.000000 0.005023 +v -0.006002 0.000000 0.005431 +v -0.005727 0.000000 0.005819 +v -0.005425 0.000000 0.006188 +v -0.005095 0.000000 0.006540 +v -0.004820 0.000000 0.006798 +v -0.004534 0.000000 0.007034 +v -0.004233 0.000000 0.007250 +v -0.003919 0.000000 0.007445 +v -0.003589 0.000000 0.007620 +v -0.003243 0.000000 0.007774 +v -0.002880 0.000000 0.007910 +v -0.002499 0.000000 0.008027 +v -0.002098 0.000000 0.008125 +v -0.001678 0.000000 0.008205 +v -0.001236 0.000000 0.008267 +v -0.000773 0.000000 0.008312 +v -0.000556 0.000000 0.008325 +v -0.000333 0.000000 0.008334 +v -0.000104 0.000000 0.008337 +v 0.000129 0.000000 0.008336 +v 0.000363 0.000000 0.008330 +v 0.000596 0.000000 0.008320 +v 0.000827 0.000000 0.008305 +v 0.001055 0.000000 0.008286 +v 0.001275 0.000000 0.008262 +v 0.001488 0.000000 0.008234 +v 0.001691 0.000000 0.008202 +v 0.001882 0.000000 0.008167 +v 0.002524 0.000000 0.008006 +v 0.003129 0.000000 0.007794 +v 0.003697 0.000000 0.007531 +v 0.004227 0.000000 0.007219 +v 0.004718 0.000000 0.006858 +v 0.005171 0.000000 0.006449 +v 0.005582 0.000000 0.005994 +v 0.005953 0.000000 0.005492 +v 0.006283 0.000000 0.004944 +v 0.006569 0.000000 0.004352 +v 0.006813 0.000000 0.003716 +v 0.007012 0.000000 0.003038 +v 0.007083 0.000000 0.002733 +v 0.007145 0.000000 0.002407 +v 0.007201 0.000000 0.002065 +v 0.007248 0.000000 0.001708 +v 0.007287 0.000000 0.001342 +v 0.007318 0.000000 0.000969 +v 0.007340 0.000000 0.000592 +v 0.007353 0.000000 0.000216 +v 0.007358 0.000000 -0.000156 +v 0.007353 0.000000 -0.000521 +v 0.007338 0.000000 -0.000875 +v 0.007314 0.000000 -0.001214 +v 0.007250 0.000000 -0.001792 +v 0.007168 0.000000 -0.002338 +v 0.007066 0.000000 -0.002853 +v 0.006944 0.000000 -0.003340 +v 0.006801 0.000000 -0.003800 +v 0.006636 0.000000 -0.004237 +v 0.006448 0.000000 -0.004652 +v 0.006236 0.000000 -0.005047 +v 0.005999 0.000000 -0.005425 +v 0.005736 0.000000 -0.005788 +v 0.005447 0.000000 -0.006138 +v 0.005130 0.000000 -0.006476 +v 0.004901 0.000000 -0.006696 +v 0.004664 0.000000 -0.006902 +v 0.004416 0.000000 -0.007094 +v 0.004159 0.000000 -0.007273 +v 0.003893 0.000000 -0.007438 +v 0.003617 0.000000 -0.007589 +v 0.003332 0.000000 -0.007727 +v 0.003037 0.000000 -0.007852 +v 0.002732 0.000000 -0.007964 +v 0.002418 0.000000 -0.008062 +v 0.002094 0.000000 -0.008147 +v 0.001760 0.000000 -0.008219 +v 0.001571 0.000000 -0.008249 +v 0.001339 0.000000 -0.008274 +v 0.001071 0.000000 -0.008296 +v 0.000776 0.000000 -0.008312 +v 0.000464 0.000000 -0.008325 +v 0.000142 0.000000 -0.008332 +v -0.000180 0.000000 -0.008335 +v -0.000493 0.000000 -0.008332 +v -0.000790 0.000000 -0.008325 +v -0.001062 0.000000 -0.008312 +v -0.001299 0.000000 -0.008294 +v 0.001052 0.000000 -0.005349 +v 0.001165 0.000000 -0.005293 +v 0.001277 0.000000 -0.005227 +v 0.001388 0.000000 -0.005154 +v 0.001496 0.000000 -0.005073 +v 0.001601 0.000000 -0.004987 +v 0.001702 0.000000 -0.004894 +v 0.001798 0.000000 -0.004797 +v 0.001889 0.000000 -0.004696 +v 0.001973 0.000000 -0.004592 +v 0.002050 0.000000 -0.004486 +v 0.002119 0.000000 -0.004378 +v 0.002179 0.000000 -0.004269 +v 0.002223 0.000000 -0.004173 +v 0.002268 0.000000 -0.004060 +v 0.002314 0.000000 -0.003931 +v 0.002359 0.000000 -0.003789 +v 0.002405 0.000000 -0.003635 +v 0.002449 0.000000 -0.003470 +v 0.002492 0.000000 -0.003298 +v 0.002532 0.000000 -0.003120 +v 0.002571 0.000000 -0.002936 +v 0.002606 0.000000 -0.002750 +v 0.002638 0.000000 -0.002562 +v 0.002667 0.000000 -0.002376 +v 0.002692 0.000000 -0.002140 +v 0.002713 0.000000 -0.001822 +v 0.002729 0.000000 -0.001437 +v 0.002740 0.000000 -0.001003 +v 0.002747 0.000000 -0.000536 +v 0.002749 0.000000 -0.000052 +v 0.002747 0.000000 0.000431 +v 0.002740 0.000000 0.000899 +v 0.002729 0.000000 0.001333 +v 0.002713 0.000000 0.001717 +v 0.002692 0.000000 0.002035 +v 0.002667 0.000000 0.002271 +v 0.002584 0.000000 0.002777 +v 0.002485 0.000000 0.003234 +v 0.002368 0.000000 0.003646 +v 0.002234 0.000000 0.004013 +v 0.002081 0.000000 0.004337 +v 0.001907 0.000000 0.004620 +v 0.001712 0.000000 0.004863 +v 0.001493 0.000000 0.005067 +v 0.001252 0.000000 0.005235 +v 0.000985 0.000000 0.005368 +v 0.000692 0.000000 0.005468 +v 0.000372 0.000000 0.005535 +v 0.000167 0.000000 0.005559 +v -0.000041 0.000000 0.005565 +v -0.000249 0.000000 0.005556 +v -0.000455 0.000000 0.005531 +v -0.000657 0.000000 0.005492 +v -0.000853 0.000000 0.005439 +v -0.001041 0.000000 0.005372 +v -0.001218 0.000000 0.005294 +v -0.001382 0.000000 0.005203 +v -0.001532 0.000000 0.005101 +v -0.001664 0.000000 0.004989 +v -0.001778 0.000000 0.004867 +v -0.001882 0.000000 0.004727 +v -0.001981 0.000000 0.004575 +v -0.002074 0.000000 0.004409 +v -0.002162 0.000000 0.004230 +v -0.002244 0.000000 0.004038 +v -0.002320 0.000000 0.003833 +v -0.002391 0.000000 0.003614 +v -0.002457 0.000000 0.003380 +v -0.002517 0.000000 0.003133 +v -0.002572 0.000000 0.002872 +v -0.002622 0.000000 0.002596 +v -0.002667 0.000000 0.002306 +v -0.002699 0.000000 0.002022 +v -0.002725 0.000000 0.001676 +v -0.002745 0.000000 0.001279 +v -0.002760 0.000000 0.000846 +v -0.002768 0.000000 0.000388 +v -0.002771 0.000000 -0.000081 +v -0.002768 0.000000 -0.000550 +v -0.002760 0.000000 -0.001005 +v -0.002745 0.000000 -0.001433 +v -0.002725 0.000000 -0.001823 +v -0.002699 0.000000 -0.002161 +v -0.002667 0.000000 -0.002434 +v -0.002606 0.000000 -0.002807 +v -0.002537 0.000000 -0.003152 +v -0.002460 0.000000 -0.003470 +v -0.002374 0.000000 -0.003762 +v -0.002278 0.000000 -0.004029 +v -0.002172 0.000000 -0.004271 +v -0.002056 0.000000 -0.004489 +v -0.001929 0.000000 -0.004685 +v -0.001792 0.000000 -0.004860 +v -0.001642 0.000000 -0.005013 +v -0.001481 0.000000 -0.005147 +v -0.001307 0.000000 -0.005262 +v -0.001207 0.000000 -0.005318 +v -0.001108 0.000000 -0.005368 +v -0.001009 0.000000 -0.005411 +v -0.000910 0.000000 -0.005447 +v -0.000808 0.000000 -0.005478 +v -0.000702 0.000000 -0.005503 +v -0.000592 0.000000 -0.005522 +v -0.000475 0.000000 -0.005536 +v -0.000350 0.000000 -0.005546 +v -0.000217 0.000000 -0.005551 +v -0.000074 0.000000 -0.005551 +v 0.000081 0.000000 -0.005547 +v 0.000201 0.000000 -0.005544 +v 0.000307 0.000000 -0.005540 +v 0.000401 0.000000 -0.005534 +v 0.000485 0.000000 -0.005527 +v 0.000561 0.000000 -0.005517 +v 0.000632 0.000000 -0.005505 +v 0.000699 0.000000 -0.005489 +v 0.000764 0.000000 -0.005470 +v 0.000830 0.000000 -0.005447 +v 0.000898 0.000000 -0.005420 +v 0.000971 0.000000 -0.005387 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.019 +s off +f 117//1 115//1 116//1 +f 118//1 115//1 117//1 +f 118//1 114//1 115//1 +f 119//1 114//1 118//1 +f 119//1 113//1 114//1 +f 120//1 113//1 119//1 +f 120//1 112//1 113//1 +f 120//1 111//1 112//1 +f 1//1 111//1 120//1 +f 1//1 110//1 111//1 +f 2//1 110//1 1//1 +f 2//1 109//1 110//1 +f 2//1 108//1 109//1 +f 2//1 107//1 108//1 +f 3//1 107//1 2//1 +f 3//1 106//1 107//1 +f 3//1 105//1 106//1 +f 4//1 105//1 3//1 +f 4//1 104//1 105//1 +f 4//1 103//1 104//1 +f 5//1 103//1 4//1 +f 5//1 102//1 103//1 +f 5//1 101//1 102//1 +f 6//1 101//1 5//1 +f 6//1 100//1 101//1 +f 6//1 99//1 100//1 +f 7//1 99//1 6//1 +f 7//1 98//1 99//1 +f 7//1 97//1 98//1 +f 8//1 97//1 7//1 +f 8//1 96//1 97//1 +f 9//1 96//1 8//1 +f 9//1 95//1 96//1 +f 9//1 94//1 95//1 +f 10//1 226//1 9//1 +f 226//1 227//1 9//1 +f 227//1 228//1 9//1 +f 228//1 94//1 9//1 +f 229//1 94//1 228//1 +f 230//1 94//1 229//1 +f 10//1 225//1 226//1 +f 231//1 94//1 230//1 +f 232//1 94//1 231//1 +f 10//1 224//1 225//1 +f 233//1 94//1 232//1 +f 234//1 94//1 233//1 +f 10//1 223//1 224//1 +f 235//1 94//1 234//1 +f 236//1 94//1 235//1 +f 10//1 222//1 223//1 +f 237//1 94//1 236//1 +f 10//1 221//1 222//1 +f 238//1 94//1 237//1 +f 239//1 94//1 238//1 +f 10//1 220//1 221//1 +f 239//1 93//1 94//1 +f 240//1 93//1 239//1 +f 10//1 219//1 220//1 +f 121//1 93//1 240//1 +f 10//1 218//1 219//1 +f 122//1 93//1 121//1 +f 10//1 217//1 218//1 +f 123//1 93//1 122//1 +f 10//1 216//1 217//1 +f 124//1 93//1 123//1 +f 11//1 216//1 10//1 +f 125//1 93//1 124//1 +f 11//1 215//1 216//1 +f 126//1 93//1 125//1 +f 126//1 92//1 93//1 +f 11//1 214//1 215//1 +f 127//1 92//1 126//1 +f 128//1 92//1 127//1 +f 11//1 213//1 214//1 +f 129//1 92//1 128//1 +f 130//1 92//1 129//1 +f 11//1 212//1 213//1 +f 130//1 91//1 92//1 +f 12//1 212//1 11//1 +f 131//1 91//1 130//1 +f 12//1 211//1 212//1 +f 132//1 91//1 131//1 +f 133//1 91//1 132//1 +f 12//1 210//1 211//1 +f 134//1 91//1 133//1 +f 134//1 90//1 91//1 +f 135//1 90//1 134//1 +f 136//1 90//1 135//1 +f 12//1 209//1 210//1 +f 13//1 209//1 12//1 +f 137//1 90//1 136//1 +f 137//1 89//1 90//1 +f 138//1 89//1 137//1 +f 13//1 208//1 209//1 +f 139//1 89//1 138//1 +f 140//1 89//1 139//1 +f 13//1 207//1 208//1 +f 140//1 88//1 89//1 +f 14//1 207//1 13//1 +f 141//1 88//1 140//1 +f 14//1 206//1 207//1 +f 142//1 88//1 141//1 +f 15//1 206//1 14//1 +f 143//1 88//1 142//1 +f 143//1 87//1 88//1 +f 15//1 205//1 206//1 +f 144//1 87//1 143//1 +f 16//1 205//1 15//1 +f 145//1 87//1 144//1 +f 16//1 204//1 205//1 +f 146//1 87//1 145//1 +f 17//1 204//1 16//1 +f 146//1 86//1 87//1 +f 17//1 203//1 204//1 +f 147//1 86//1 146//1 +f 18//1 203//1 17//1 +f 18//1 202//1 203//1 +f 148//1 86//1 147//1 +f 148//1 85//1 86//1 +f 19//1 202//1 18//1 +f 149//1 85//1 148//1 +f 19//1 201//1 202//1 +f 20//1 201//1 19//1 +f 149//1 84//1 85//1 +f 20//1 200//1 201//1 +f 150//1 84//1 149//1 +f 21//1 200//1 20//1 +f 150//1 83//1 84//1 +f 22//1 200//1 21//1 +f 22//1 199//1 200//1 +f 151//1 83//1 150//1 +f 151//1 82//1 83//1 +f 23//1 199//1 22//1 +f 151//1 81//1 82//1 +f 23//1 198//1 199//1 +f 152//1 81//1 151//1 +f 24//1 198//1 23//1 +f 152//1 80//1 81//1 +f 24//1 197//1 198//1 +f 153//1 80//1 152//1 +f 25//1 197//1 24//1 +f 153//1 79//1 80//1 +f 25//1 196//1 197//1 +f 26//1 196//1 25//1 +f 154//1 79//1 153//1 +f 154//1 78//1 79//1 +f 26//1 195//1 196//1 +f 155//1 78//1 154//1 +f 155//1 77//1 78//1 +f 27//1 195//1 26//1 +f 27//1 194//1 195//1 +f 155//1 76//1 77//1 +f 156//1 76//1 155//1 +f 27//1 193//1 194//1 +f 157//1 76//1 156//1 +f 157//1 75//1 76//1 +f 28//1 193//1 27//1 +f 158//1 75//1 157//1 +f 28//1 192//1 193//1 +f 158//1 74//1 75//1 +f 28//1 191//1 192//1 +f 29//1 191//1 28//1 +f 158//1 73//1 74//1 +f 159//1 73//1 158//1 +f 29//1 190//1 191//1 +f 159//1 72//1 73//1 +f 29//1 189//1 190//1 +f 30//1 189//1 29//1 +f 160//1 72//1 159//1 +f 30//1 188//1 189//1 +f 30//1 187//1 188//1 +f 161//1 72//1 160//1 +f 31//1 187//1 30//1 +f 161//1 71//1 72//1 +f 31//1 186//1 187//1 +f 162//1 71//1 161//1 +f 31//1 185//1 186//1 +f 32//1 185//1 31//1 +f 32//1 184//1 185//1 +f 163//1 71//1 162//1 +f 163//1 70//1 71//1 +f 32//1 183//1 184//1 +f 32//1 182//1 183//1 +f 33//1 182//1 32//1 +f 164//1 70//1 163//1 +f 33//1 181//1 182//1 +f 165//1 70//1 164//1 +f 33//1 180//1 181//1 +f 165//1 69//1 70//1 +f 33//1 179//1 180//1 +f 34//1 179//1 33//1 +f 166//1 69//1 165//1 +f 34//1 178//1 179//1 +f 34//1 177//1 178//1 +f 167//1 69//1 166//1 +f 34//1 176//1 177//1 +f 168//1 69//1 167//1 +f 34//1 175//1 176//1 +f 35//1 175//1 34//1 +f 35//1 174//1 175//1 +f 169//1 69//1 168//1 +f 169//1 68//1 69//1 +f 35//1 173//1 174//1 +f 35//1 172//1 173//1 +f 170//1 68//1 169//1 +f 35//1 171//1 172//1 +f 171//1 68//1 170//1 +f 35//1 68//1 171//1 +f 36//1 68//1 35//1 +f 36//1 67//1 68//1 +f 37//1 67//1 36//1 +f 37//1 66//1 67//1 +f 38//1 66//1 37//1 +f 39//1 66//1 38//1 +f 39//1 65//1 66//1 +f 40//1 65//1 39//1 +f 40//1 64//1 65//1 +f 41//1 64//1 40//1 +f 42//1 64//1 41//1 +f 42//1 63//1 64//1 +f 43//1 63//1 42//1 +f 44//1 63//1 43//1 +f 44//1 62//1 63//1 +f 45//1 62//1 44//1 +f 45//1 61//1 62//1 +f 46//1 61//1 45//1 +f 47//1 61//1 46//1 +f 47//1 60//1 61//1 +f 47//1 59//1 60//1 +f 48//1 59//1 47//1 +f 48//1 58//1 59//1 +f 48//1 57//1 58//1 +f 49//1 57//1 48//1 +f 49//1 56//1 57//1 +f 49//1 55//1 56//1 +f 50//1 55//1 49//1 +f 50//1 54//1 55//1 +f 51//1 54//1 50//1 +f 51//1 53//1 54//1 +f 52//1 53//1 51//1 diff --git a/alphanumeric/1.mtl b/alphanumeric/1.mtl new file mode 100644 index 0000000..a167568 --- /dev/null +++ b/alphanumeric/1.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.021 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/1.obj b/alphanumeric/1.obj new file mode 100644 index 0000000..80a17c7 --- /dev/null +++ b/alphanumeric/1.obj @@ -0,0 +1,266 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 1.mtl +o Curve.002 +v 0.001865 0.000000 -0.008329 +v 0.003208 0.000000 -0.008324 +v 0.001869 0.000000 -0.008336 +v 0.001856 0.000000 -0.008310 +v 0.004541 0.000000 -0.008305 +v 0.001842 0.000000 -0.008281 +v 0.004559 -0.000000 0.000012 +v 0.001824 0.000000 -0.008244 +v 0.001804 0.000000 -0.008203 +v 0.001782 0.000000 -0.008159 +v 0.001761 0.000000 -0.008115 +v 0.001740 0.000000 -0.008074 +v 0.001722 0.000000 -0.008038 +v 0.001708 0.000000 -0.008009 +v 0.001699 0.000000 -0.007989 +v 0.001695 0.000000 -0.007982 +v 0.001591 0.000000 -0.007781 +v 0.001478 0.000000 -0.007590 +v 0.001356 0.000000 -0.007410 +v 0.001224 0.000000 -0.007239 +v 0.001081 0.000000 -0.007077 +v 0.000926 0.000000 -0.006923 +v 0.000759 0.000000 -0.006777 +v 0.000578 0.000000 -0.006638 +v 0.000384 0.000000 -0.006504 +v 0.000174 0.000000 -0.006375 +v -0.000052 0.000000 -0.006251 +v -0.000294 0.000000 -0.006129 +v -0.000527 0.000000 -0.006025 +v -0.000767 0.000000 -0.005929 +v -0.001016 0.000000 -0.005842 +v -0.001275 0.000000 -0.005761 +v -0.001545 0.000000 -0.005689 +v -0.001828 0.000000 -0.005623 +v -0.002124 0.000000 -0.005566 +v -0.002436 0.000000 -0.005514 +v -0.002764 0.000000 -0.005470 +v 0.000400 0.000000 -0.003192 +v -0.003110 0.000000 -0.005432 +v -0.003474 0.000000 -0.005399 +v -0.003859 0.000000 -0.005373 +v -0.003873 0.000000 -0.005372 +v -0.003911 0.000000 -0.005370 +v -0.003970 0.000000 -0.005366 +v -0.004043 0.000000 -0.005362 +v -0.004127 0.000000 -0.005357 +v -0.004215 0.000000 -0.005351 +v -0.004303 0.000000 -0.005346 +v -0.004387 0.000000 -0.005341 +v -0.004460 0.000000 -0.005337 +v -0.004519 0.000000 -0.005333 +v -0.004558 0.000000 -0.005331 +v -0.004572 0.000000 -0.005330 +v -0.004572 0.000000 -0.004307 +v -0.004572 0.000000 -0.003285 +v -0.004293 0.000000 -0.003242 +v -0.004235 0.000000 -0.003237 +v -0.004139 0.000000 -0.003232 +v -0.004010 0.000000 -0.003227 +v -0.003849 0.000000 -0.003222 +v -0.003661 0.000000 -0.003217 +v -0.003447 0.000000 -0.003213 +v -0.003212 0.000000 -0.003209 +v -0.002957 0.000000 -0.003205 +v -0.002686 0.000000 -0.003202 +v -0.002402 0.000000 -0.003200 +v -0.002108 0.000000 -0.003199 +v -0.001807 0.000000 -0.003198 +v -0.001764 0.000000 -0.003198 +v -0.001644 0.000000 -0.003198 +v -0.001235 0.000000 -0.003196 +v -0.001462 0.000000 -0.003197 +v -0.000977 0.000000 -0.003196 +v -0.000704 0.000000 -0.003195 +v -0.000430 0.000000 -0.003194 +v -0.000172 0.000000 -0.003193 +v 0.000055 0.000000 -0.003193 +v 0.000237 0.000000 -0.003192 +v 0.000357 0.000000 -0.003192 +v 0.000443 0.000000 -0.002963 +v 0.000448 0.000000 -0.002880 +v 0.000452 0.000000 -0.002703 +v 0.000457 0.000000 -0.002437 +v 0.000461 0.000000 -0.002090 +v 0.000465 0.000000 -0.001670 +v 0.000469 0.000000 -0.001182 +v 0.000472 0.000000 -0.000634 +v 0.000475 0.000000 -0.000033 +v 0.000477 -0.000000 0.000614 +v 0.004572 -0.000000 0.008336 +v 0.000479 -0.000000 0.001301 +v 0.000480 -0.000000 0.002020 +v 0.000481 -0.000000 0.002764 +v 0.000481 -0.000000 0.002872 +v 0.000481 -0.000000 0.003171 +v 0.000481 -0.000000 0.003622 +v 0.000481 -0.000000 0.004188 +v 0.000481 -0.000000 0.004829 +v 0.000481 -0.000000 0.005510 +v 0.000481 -0.000000 0.006190 +v 0.000481 -0.000000 0.006832 +v 0.000481 -0.000000 0.007397 +v 0.000481 -0.000000 0.007848 +v 0.000481 -0.000000 0.008147 +v 0.000481 -0.000000 0.008255 +v 0.000672 -0.000000 0.008299 +v 0.000716 -0.000000 0.008303 +v 0.000790 -0.000000 0.008308 +v 0.000894 -0.000000 0.008312 +v 0.001024 -0.000000 0.008317 +v 0.001178 -0.000000 0.008321 +v 0.001354 -0.000000 0.008324 +v 0.001549 -0.000000 0.008328 +v 0.001759 -0.000000 0.008331 +v 0.001984 -0.000000 0.008333 +v 0.002221 -0.000000 0.008335 +v 0.002467 -0.000000 0.008336 +v 0.002718 -0.000000 0.008336 +v 0.002755 -0.000000 0.008336 +v 0.002856 -0.000000 0.008336 +v 0.003008 -0.000000 0.008336 +v 0.003199 -0.000000 0.008336 +v 0.003415 -0.000000 0.008336 +v 0.003645 -0.000000 0.008336 +v 0.003875 -0.000000 0.008336 +v 0.004091 -0.000000 0.008336 +v 0.004282 -0.000000 0.008336 +v 0.004434 -0.000000 0.008336 +v 0.004535 -0.000000 0.008336 +vn -0.0000 1.0000 0.0000 +vn -0.0000 1.0000 0.0003 +vn 0.0000 0.0000 1.0000 +vn 0.0000 -1.0000 0.0000 +usemtl SVGMat.021 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 9//1 7//1 8//1 +f 10//1 7//1 9//1 +f 11//1 7//1 10//1 +f 12//1 7//1 11//1 +f 13//1 7//1 12//1 +f 14//1 7//1 13//1 +f 15//1 7//1 14//1 +f 16//1 7//1 15//1 +f 17//1 7//1 16//1 +f 18//1 7//1 17//1 +f 19//1 7//1 18//1 +f 20//1 7//1 19//1 +f 21//1 7//1 20//1 +f 22//1 7//1 21//1 +f 23//1 7//1 22//1 +f 24//1 7//1 23//1 +f 25//1 7//1 24//1 +f 26//1 7//1 25//1 +f 27//1 7//1 26//1 +f 28//1 7//1 27//1 +f 29//1 7//1 28//1 +f 30//1 7//1 29//1 +f 31//1 7//1 30//1 +f 32//1 7//1 31//1 +f 33//1 7//1 32//1 +f 34//1 7//1 33//1 +f 35//1 7//1 34//1 +f 36//1 7//1 35//1 +f 37//1 38//1 36//1 +f 38//1 7//1 36//1 +f 39//1 38//1 37//1 +f 40//1 38//1 39//1 +f 41//1 38//1 40//1 +f 42//1 38//1 41//1 +f 43//1 38//1 42//1 +f 44//1 38//1 43//1 +f 45//1 38//1 44//1 +f 46//1 38//1 45//1 +f 47//1 38//1 46//1 +f 48//1 38//1 47//1 +f 49//1 38//1 48//1 +f 50//1 38//1 49//1 +f 51//1 38//1 50//1 +f 52//1 38//1 51//1 +f 53//1 38//1 52//1 +f 54//1 38//1 53//1 +f 55//1 38//1 54//1 +f 56//1 38//1 55//1 +f 57//1 38//1 56//1 +f 58//1 38//1 57//1 +f 59//1 38//1 58//1 +f 60//1 38//1 59//1 +f 61//1 38//1 60//1 +f 62//1 38//1 61//1 +f 63//1 38//1 62//1 +f 64//1 38//1 63//1 +f 65//1 38//1 64//1 +f 66//1 38//1 65//1 +f 67//1 68//2 66//1 +f 68//2 38//1 66//1 +f 69//1 38//1 68//2 +f 70//1 71//1 69//1 +f 71//1 38//1 69//1 +f 72//1 71//1 70//1 +f 73//2 38//1 71//1 +f 74//1 38//1 73//2 +f 75//1 38//1 74//1 +f 76//1 38//1 75//1 +f 77//2 38//1 76//1 +f 78//1 38//1 77//2 +f 79//1 38//1 78//1 +f 80//1 7//1 38//1 +f 81//1 7//1 80//1 +f 82//1 7//1 81//1 +f 83//1 7//1 82//1 +f 84//1 7//1 83//1 +f 85//1 7//1 84//1 +f 86//1 7//1 85//1 +f 87//1 7//1 86//1 +f 88//1 7//1 87//1 +f 89//1 7//1 88//1 +f 89//1 90//1 7//1 +f 91//1 90//1 89//1 +f 92//1 90//1 91//1 +f 93//1 90//1 92//1 +f 94//1 90//1 93//1 +f 95//1 90//1 94//1 +f 96//1 90//1 95//1 +f 97//1 90//1 96//1 +f 98//1 90//1 97//1 +f 99//1 90//1 98//1 +f 100//1 90//1 99//1 +f 101//1 90//1 100//1 +f 102//1 90//1 101//1 +f 103//1 90//1 102//1 +f 104//1 90//1 103//1 +f 105//1 90//1 104//1 +f 106//1 90//1 105//1 +f 107//1 90//1 106//1 +f 108//1 90//1 107//1 +f 109//1 90//1 108//1 +f 110//1 90//1 109//1 +f 111//1 90//1 110//1 +f 112//1 90//1 111//1 +f 113//1 90//1 112//1 +f 114//1 90//1 113//1 +f 115//1 90//1 114//1 +f 116//1 90//1 115//1 +f 117//1 90//1 116//1 +f 118//1 90//1 117//1 +f 119//3 90//4 118//3 +f 120//3 90//4 119//3 +f 121//3 90//4 120//3 +f 122//3 90//4 121//3 +f 123//3 90//4 122//3 +f 124//3 90//4 123//3 +f 125//3 90//4 124//3 +f 126//3 90//4 125//3 +f 127//3 90//4 126//3 +f 128//3 90//4 127//3 +f 129//3 90//3 128//3 diff --git a/alphanumeric/1443377.svg b/alphanumeric/1443377.svg new file mode 100644 index 0000000..9a35586 --- /dev/null +++ b/alphanumeric/1443377.svg @@ -0,0 +1,137 @@ + + + + +Created by potrace 1.15, written by Peter Selinger 2001-2017 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/alphanumeric/2.mtl b/alphanumeric/2.mtl new file mode 100644 index 0000000..f3ce920 --- /dev/null +++ b/alphanumeric/2.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.022 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/2.obj b/alphanumeric/2.obj new file mode 100644 index 0000000..6e99396 --- /dev/null +++ b/alphanumeric/2.obj @@ -0,0 +1,638 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 2.mtl +o Curve.001_Curve.011 +v -0.000902 0.000000 -0.008312 +v -0.000208 0.000000 -0.008312 +v -0.000567 0.000000 -0.008314 +v 0.000163 0.000000 -0.008305 +v -0.001202 0.000000 -0.008304 +v 0.000534 0.000000 -0.008293 +v -0.001455 0.000000 -0.008291 +v 0.000893 0.000000 -0.008278 +v -0.001648 0.000000 -0.008272 +v 0.001228 0.000000 -0.008259 +v -0.002163 0.000000 -0.008188 +v 0.001527 0.000000 -0.008236 +v 0.001779 0.000000 -0.008210 +v 0.001970 0.000000 -0.008182 +v -0.002654 0.000000 -0.008081 +v 0.002547 0.000000 -0.008055 +v -0.003118 0.000000 -0.007950 +v 0.003096 0.000000 -0.007898 +v -0.003558 0.000000 -0.007797 +v 0.003616 0.000000 -0.007709 +v -0.003973 0.000000 -0.007621 +v 0.004104 0.000000 -0.007490 +v -0.004361 0.000000 -0.007421 +v 0.004560 0.000000 -0.007243 +v -0.004723 0.000000 -0.007199 +v 0.004981 0.000000 -0.006969 +v -0.005060 0.000000 -0.006955 +v 0.005365 0.000000 -0.006670 +v -0.005370 0.000000 -0.006687 +v -0.005655 0.000000 -0.006397 +v 0.005712 0.000000 -0.006346 +v -0.005912 0.000000 -0.006085 +v 0.006018 0.000000 -0.005999 +v -0.006143 0.000000 -0.005749 +v -0.000088 0.000000 -0.005770 +v 0.006282 0.000000 -0.005630 +v -0.000417 0.000000 -0.005753 +v 0.000242 0.000000 -0.005762 +v 0.000571 0.000000 -0.005725 +v -0.000740 0.000000 -0.005709 +v -0.006252 0.000000 -0.005560 +v 0.000892 0.000000 -0.005662 +v -0.001052 0.000000 -0.005639 +v 0.001202 0.000000 -0.005571 +v -0.001348 0.000000 -0.005545 +v 0.006502 0.000000 -0.005240 +v 0.001496 0.000000 -0.005451 +v -0.006355 0.000000 -0.005352 +v -0.001624 0.000000 -0.005424 +v 0.001640 0.000000 -0.005378 +v -0.001876 0.000000 -0.005280 +v 0.001777 0.000000 -0.005298 +v -0.006451 0.000000 -0.005128 +v 0.001908 0.000000 -0.005211 +v -0.002098 0.000000 -0.005111 +v 0.006678 0.000000 -0.004831 +v 0.002033 0.000000 -0.005119 +v -0.006539 0.000000 -0.004891 +v 0.002150 0.000000 -0.005022 +v -0.002232 0.000000 -0.004985 +v 0.002260 0.000000 -0.004920 +v -0.002354 0.000000 -0.004853 +v 0.002362 0.000000 -0.004814 +v -0.006617 0.000000 -0.004644 +v -0.002463 0.000000 -0.004714 +v 0.006778 0.000000 -0.004521 +v 0.002456 0.000000 -0.004703 +v -0.002562 0.000000 -0.004568 +v 0.002541 0.000000 -0.004588 +v -0.006687 0.000000 -0.004389 +v 0.002616 0.000000 -0.004471 +v -0.002649 0.000000 -0.004414 +v 0.006855 0.000000 -0.004199 +v 0.002681 0.000000 -0.004351 +v -0.002725 0.000000 -0.004251 +v -0.006746 0.000000 -0.004130 +v 0.002737 0.000000 -0.004229 +v -0.002790 0.000000 -0.004078 +v 0.002766 0.000000 -0.004152 +v 0.006911 0.000000 -0.003868 +v 0.002792 0.000000 -0.004080 +v -0.006795 0.000000 -0.003869 +v 0.002813 0.000000 -0.004010 +v -0.002845 0.000000 -0.003894 +v 0.002831 0.000000 -0.003939 +v 0.002846 0.000000 -0.003866 +v -0.002890 0.000000 -0.003700 +v -0.006832 0.000000 -0.003609 +v 0.006944 0.000000 -0.003531 +v 0.002858 0.000000 -0.003786 +v 0.002867 0.000000 -0.003698 +v -0.002926 0.000000 -0.003494 +v 0.002873 0.000000 -0.003599 +v -0.006857 0.000000 -0.003352 +v 0.002878 0.000000 -0.003486 +v 0.006956 0.000000 -0.003192 +v -0.002951 0.000000 -0.003275 +v 0.002881 0.000000 -0.003356 +v 0.002882 0.000000 -0.003208 +v -0.006868 0.000000 -0.003102 +v -0.002968 0.000000 -0.003044 +v 0.002883 0.000000 -0.003037 +v 0.006947 0.000000 -0.002854 +v -0.006867 0.000000 -0.002861 +v -0.002973 0.000000 -0.002955 +v 0.002883 0.000000 -0.003021 +v 0.002883 0.000000 -0.002977 +v 0.002883 0.000000 -0.002909 +v -0.002980 0.000000 -0.002870 +v 0.002883 0.000000 -0.002825 +v -0.002987 0.000000 -0.002789 +v -0.006866 0.000000 -0.002852 +v 0.006917 0.000000 -0.002521 +v -0.006865 0.000000 -0.002827 +v -0.006864 0.000000 -0.002790 +v 0.002883 0.000000 -0.002728 +v -0.006862 0.000000 -0.002743 +v -0.002995 0.000000 -0.002713 +v -0.006860 0.000000 -0.002689 +v 0.002883 0.000000 -0.002627 +v -0.003003 0.000000 -0.002642 +v -0.006857 0.000000 -0.002633 +v -0.003013 0.000000 -0.002577 +v -0.006855 0.000000 -0.002576 +v 0.002883 0.000000 -0.002525 +v -0.003023 0.000000 -0.002520 +v -0.006853 0.000000 -0.002523 +v 0.002883 0.000000 -0.002429 +v -0.006851 0.000000 -0.002476 +v 0.006866 0.000000 -0.002195 +v -0.003032 0.000000 -0.002470 +v -0.006849 0.000000 -0.002439 +v -0.003043 0.000000 -0.002429 +v -0.006848 0.000000 -0.002414 +v 0.002883 0.000000 -0.002345 +v -0.003052 0.000000 -0.002397 +v -0.006848 0.000000 -0.002405 +v -0.006544 0.000000 -0.002362 +v -0.003062 0.000000 -0.002374 +v -0.003071 0.000000 -0.002362 +v -0.006389 0.000000 -0.002350 +v -0.003150 0.000000 -0.002350 +v -0.006147 0.000000 -0.002339 +v -0.003329 0.000000 -0.002339 +v 0.002883 0.000000 -0.002277 +v -0.005836 0.000000 -0.002332 +v -0.003591 0.000000 -0.002332 +v -0.005476 0.000000 -0.002326 +v -0.003916 0.000000 -0.002326 +v -0.005085 0.000000 -0.002322 +v -0.004286 0.000000 -0.002322 +v -0.004682 0.000000 -0.002321 +v 0.002883 0.000000 -0.002232 +v 0.002883 0.000000 -0.002216 +v 0.002652 0.000000 -0.001754 +v 0.006794 0.000000 -0.001880 +v 0.006702 0.000000 -0.001580 +v 0.002520 0.000000 -0.001513 +v 0.006590 0.000000 -0.001298 +v 0.002363 0.000000 -0.001278 +v 0.006459 0.000000 -0.001037 +v 0.002174 0.000000 -0.001045 +v 0.001947 0.000000 -0.000808 +v 0.006341 0.000000 -0.000846 +v 0.006207 0.000000 -0.000653 +v 0.001674 0.000000 -0.000561 +v 0.006056 0.000000 -0.000458 +v 0.001349 0.000000 -0.000302 +v 0.005889 0.000000 -0.000263 +v 0.000966 0.000000 -0.000022 +v 0.005706 0.000000 -0.000066 +v 0.005507 -0.000000 0.000131 +v 0.000516 -0.000000 0.000281 +v 0.005292 -0.000000 0.000330 +v -0.000005 -0.000000 0.000613 +v 0.005062 -0.000000 0.000528 +v 0.004817 -0.000000 0.000727 +v -0.000607 -0.000000 0.000981 +v 0.004556 -0.000000 0.000926 +v 0.004280 -0.000000 0.001124 +v -0.001294 -0.000000 0.001388 +v 0.003990 -0.000000 0.001322 +v 0.003891 -0.000000 0.001388 +v -0.002074 -0.000000 0.001839 +v 0.003768 -0.000000 0.001469 +v 0.003622 -0.000000 0.001561 +v 0.003457 -0.000000 0.001665 +v 0.003275 -0.000000 0.001779 +v 0.003079 -0.000000 0.001901 +v -0.002464 -0.000000 0.002065 +v 0.002872 -0.000000 0.002029 +v 0.002656 -0.000000 0.002161 +v -0.002815 -0.000000 0.002269 +v 0.002433 -0.000000 0.002296 +v -0.003129 -0.000000 0.002456 +v 0.002208 -0.000000 0.002433 +v 0.001982 -0.000000 0.002569 +v -0.003411 -0.000000 0.002626 +v 0.001758 -0.000000 0.002703 +v -0.003664 -0.000000 0.002783 +v 0.001334 -0.000000 0.002955 +v -0.003894 -0.000000 0.002930 +v -0.004103 -0.000000 0.003069 +v 0.000965 -0.000000 0.003175 +v -0.004297 -0.000000 0.003203 +v 0.000645 -0.000000 0.003369 +v -0.004479 -0.000000 0.003335 +v -0.004653 -0.000000 0.003467 +v 0.000368 -0.000000 0.003537 +v -0.004823 -0.000000 0.003602 +v 0.000129 -0.000000 0.003686 +v -0.004993 -0.000000 0.003743 +v -0.000079 -0.000000 0.003818 +v -0.005292 -0.000000 0.004007 +v -0.000261 -0.000000 0.003937 +v -0.000423 -0.000000 0.004047 +v -0.005566 -0.000000 0.004279 +v -0.000570 -0.000000 0.004152 +v -0.000710 -0.000000 0.004255 +v -0.000846 -0.000000 0.004361 +v -0.005814 -0.000000 0.004559 +v -0.000985 -0.000000 0.004472 +v -0.001090 -0.000000 0.004559 +v -0.006039 -0.000000 0.004849 +v -0.001195 -0.000000 0.004654 +v -0.001298 -0.000000 0.004753 +v -0.001398 -0.000000 0.004856 +v -0.006241 -0.000000 0.005150 +v -0.001492 -0.000000 0.004959 +v -0.001577 -0.000000 0.005058 +v -0.001653 -0.000000 0.005153 +v -0.006421 -0.000000 0.005465 +v -0.001716 -0.000000 0.005240 +v -0.001765 -0.000000 0.005316 +v -0.001796 -0.000000 0.005380 +v -0.001809 -0.000000 0.005428 +v -0.001800 -0.000000 0.005457 +v -0.001756 -0.000000 0.005464 +v -0.001638 -0.000000 0.005469 +v -0.006579 -0.000000 0.005794 +v -0.001449 -0.000000 0.005475 +v -0.001196 -0.000000 0.005481 +v -0.000883 -0.000000 0.005487 +v -0.000517 -0.000000 0.005492 +v -0.000102 -0.000000 0.005498 +v 0.000356 -0.000000 0.005503 +v 0.000853 -0.000000 0.005507 +v 0.001382 -0.000000 0.005511 +v 0.001939 -0.000000 0.005515 +v 0.002518 -0.000000 0.005518 +v 0.006800 -0.000000 0.005536 +v 0.006800 -0.000000 0.005541 +v 0.006803 -0.000000 0.005555 +v 0.006805 -0.000000 0.005576 +v 0.006809 -0.000000 0.005603 +v 0.006813 -0.000000 0.005633 +v 0.006818 -0.000000 0.005664 +v 0.006823 -0.000000 0.005696 +v 0.006826 -0.000000 0.005725 +v 0.006830 -0.000000 0.005752 +v 0.006833 -0.000000 0.005773 +v 0.006835 -0.000000 0.005787 +v 0.006836 -0.000000 0.005792 +v 0.006841 -0.000000 0.005837 +v -0.006716 -0.000000 0.006139 +v 0.006845 -0.000000 0.005900 +v 0.006848 -0.000000 0.005982 +v 0.006852 -0.000000 0.006078 +v 0.006854 -0.000000 0.006188 +v -0.006834 -0.000000 0.006501 +v 0.006857 -0.000000 0.006310 +v 0.006858 -0.000000 0.006443 +v 0.006859 -0.000000 0.006584 +v -0.006932 -0.000000 0.006882 +v 0.006859 -0.000000 0.006733 +v 0.006859 -0.000000 0.006886 +v -0.007012 -0.000000 0.007284 +v 0.006857 -0.000000 0.007043 +v 0.006854 -0.000000 0.007203 +v 0.006836 -0.000000 0.008358 +v -0.007073 -0.000000 0.007707 +v -0.007082 -0.000000 0.007782 +v -0.007089 -0.000000 0.007856 +v -0.007096 -0.000000 0.007926 +v -0.007101 -0.000000 0.007993 +v -0.007105 -0.000000 0.008056 +v -0.007108 -0.000000 0.008114 +v -0.007110 -0.000000 0.008166 +v -0.007110 -0.000000 0.008212 +v -0.007109 -0.000000 0.008250 +v -0.007107 -0.000000 0.008281 +v -0.007103 -0.000000 0.008303 +v -0.007098 -0.000000 0.008315 +v -0.007028 -0.000000 0.008321 +v -0.006837 -0.000000 0.008327 +v -0.006533 -0.000000 0.008332 +v -0.006124 -0.000000 0.008337 +v -0.005619 -0.000000 0.008342 +v -0.005027 -0.000000 0.008346 +v -0.004355 -0.000000 0.008349 +v -0.003613 -0.000000 0.008352 +v -0.002809 -0.000000 0.008355 +v -0.001952 -0.000000 0.008357 +v -0.001049 -0.000000 0.008358 +v -0.000109 -0.000000 0.008358 +v 0.000027 -0.000000 0.008358 +v 0.000405 -0.000000 0.008358 +v 0.000976 -0.000000 0.008358 +v 0.001691 -0.000000 0.008358 +v 0.002503 -0.000000 0.008358 +v 0.003363 -0.000000 0.008358 +v 0.004223 -0.000000 0.008358 +v 0.005035 -0.000000 0.008358 +v 0.005751 -0.000000 0.008358 +v 0.006322 -0.000000 0.008358 +v 0.006699 -0.000000 0.008358 +vn -0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.022 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 9//1 10//1 8//1 +f 11//1 10//1 9//1 +f 11//1 12//1 10//1 +f 11//1 13//1 12//1 +f 11//1 14//1 13//1 +f 15//1 14//1 11//1 +f 15//1 16//1 14//1 +f 17//1 16//1 15//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 19//1 20//1 18//1 +f 21//1 20//1 19//1 +f 21//1 22//1 20//1 +f 23//1 22//1 21//1 +f 23//1 24//1 22//1 +f 25//1 24//1 23//1 +f 25//1 26//1 24//1 +f 27//1 26//1 25//1 +f 27//1 28//1 26//1 +f 29//1 28//1 27//1 +f 30//1 28//1 29//1 +f 30//1 31//1 28//1 +f 32//1 31//1 30//1 +f 32//1 33//1 31//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 35//1 36//1 33//1 +f 34//1 37//1 35//1 +f 38//1 36//1 35//1 +f 39//1 36//1 38//1 +f 34//1 40//1 37//1 +f 41//1 40//1 34//1 +f 42//1 36//1 39//1 +f 41//1 43//1 40//1 +f 44//1 36//1 42//1 +f 41//1 45//1 43//1 +f 44//1 46//1 36//1 +f 47//1 46//1 44//1 +f 48//1 45//1 41//1 +f 48//1 49//1 45//1 +f 50//1 46//1 47//1 +f 48//1 51//1 49//1 +f 52//1 46//1 50//1 +f 53//1 51//1 48//1 +f 54//1 46//1 52//1 +f 53//1 55//1 51//1 +f 54//1 56//1 46//1 +f 57//1 56//1 54//1 +f 58//1 55//1 53//1 +f 59//1 56//1 57//1 +f 58//1 60//1 55//1 +f 61//1 56//1 59//1 +f 58//1 62//1 60//1 +f 63//1 56//1 61//1 +f 64//1 62//1 58//1 +f 64//1 65//1 62//1 +f 63//1 66//1 56//1 +f 67//1 66//1 63//1 +f 64//1 68//1 65//1 +f 69//1 66//1 67//1 +f 70//1 68//1 64//1 +f 71//1 66//1 69//1 +f 70//1 72//1 68//1 +f 71//1 73//1 66//1 +f 74//1 73//1 71//1 +f 70//1 75//1 72//1 +f 76//1 75//1 70//1 +f 77//1 73//1 74//1 +f 76//1 78//1 75//1 +f 79//1 73//1 77//1 +f 79//1 80//1 73//1 +f 81//1 80//1 79//1 +f 82//1 78//1 76//1 +f 83//1 80//1 81//1 +f 82//1 84//1 78//1 +f 85//1 80//1 83//1 +f 86//1 80//1 85//1 +f 82//1 87//1 84//1 +f 88//1 87//1 82//1 +f 86//1 89//1 80//1 +f 90//1 89//1 86//1 +f 91//1 89//1 90//1 +f 88//1 92//1 87//1 +f 93//1 89//1 91//1 +f 94//1 92//1 88//1 +f 95//1 89//1 93//1 +f 95//1 96//1 89//1 +f 94//1 97//1 92//1 +f 98//1 96//1 95//1 +f 99//1 96//1 98//1 +f 100//1 97//1 94//1 +f 100//1 101//1 97//1 +f 102//1 96//1 99//1 +f 102//1 103//1 96//1 +f 104//1 101//1 100//1 +f 104//1 105//1 101//1 +f 106//1 103//1 102//1 +f 107//1 103//1 106//1 +f 108//1 103//1 107//1 +f 104//1 109//1 105//1 +f 110//1 103//1 108//1 +f 104//1 111//1 109//1 +f 112//1 111//1 104//1 +f 110//1 113//1 103//1 +f 114//1 111//1 112//1 +f 115//1 111//1 114//1 +f 116//1 113//1 110//1 +f 117//1 111//1 115//1 +f 117//1 118//1 111//1 +f 119//1 118//1 117//1 +f 120//1 113//1 116//1 +f 119//1 121//1 118//1 +f 122//1 121//1 119//1 +f 122//1 123//1 121//1 +f 124//1 123//1 122//1 +f 125//1 113//1 120//1 +f 124//1 126//1 123//1 +f 127//1 126//1 124//1 +f 128//1 113//1 125//1 +f 129//1 126//1 127//1 +f 128//1 130//1 113//1 +f 129//1 131//1 126//1 +f 132//1 131//1 129//1 +f 132//1 133//1 131//1 +f 134//1 133//1 132//1 +f 135//1 130//1 128//1 +f 134//1 136//1 133//1 +f 137//1 136//1 134//1 +f 138//1 136//1 137//1 +f 138//1 139//1 136//1 +f 138//1 140//1 139//1 +f 141//1 140//1 138//1 +f 141//1 142//1 140//1 +f 143//1 142//1 141//1 +f 143//1 144//1 142//1 +f 145//1 130//1 135//1 +f 146//1 144//1 143//1 +f 146//1 147//1 144//1 +f 148//1 147//1 146//1 +f 148//1 149//1 147//1 +f 150//1 149//1 148//1 +f 150//1 151//1 149//1 +f 152//1 151//1 150//1 +f 153//1 130//1 145//1 +f 154//1 130//1 153//1 +f 155//1 130//1 154//1 +f 155//1 156//1 130//1 +f 155//1 157//1 156//1 +f 158//1 157//1 155//1 +f 158//1 159//1 157//1 +f 160//1 159//1 158//1 +f 160//1 161//1 159//1 +f 162//1 161//1 160//1 +f 163//1 161//1 162//1 +f 163//1 164//1 161//1 +f 163//1 165//1 164//1 +f 166//1 165//1 163//1 +f 166//1 167//1 165//1 +f 168//1 167//1 166//1 +f 168//1 169//1 167//1 +f 170//1 169//1 168//1 +f 170//1 171//1 169//1 +f 170//1 172//1 171//1 +f 173//1 172//1 170//1 +f 173//1 174//1 172//1 +f 175//1 174//1 173//1 +f 175//1 176//1 174//1 +f 175//1 177//1 176//1 +f 178//1 177//1 175//1 +f 178//1 179//1 177//1 +f 178//1 180//1 179//1 +f 181//1 180//1 178//1 +f 181//1 182//1 180//1 +f 181//1 183//1 182//1 +f 184//1 183//1 181//1 +f 184//1 185//1 183//1 +f 184//1 186//1 185//1 +f 184//1 187//1 186//1 +f 184//1 188//1 187//1 +f 184//1 189//1 188//1 +f 190//1 189//1 184//1 +f 190//1 191//1 189//1 +f 190//1 192//1 191//1 +f 193//1 192//1 190//1 +f 193//1 194//1 192//1 +f 195//1 194//1 193//1 +f 195//1 196//1 194//1 +f 195//1 197//1 196//1 +f 198//1 197//1 195//1 +f 198//1 199//1 197//1 +f 200//1 199//1 198//1 +f 200//1 201//1 199//1 +f 202//1 201//1 200//1 +f 203//1 201//1 202//1 +f 203//1 204//1 201//1 +f 205//1 204//1 203//1 +f 205//1 206//1 204//1 +f 207//1 206//1 205//1 +f 208//1 206//1 207//1 +f 208//1 209//1 206//1 +f 210//1 209//1 208//1 +f 210//1 211//1 209//1 +f 212//1 211//1 210//1 +f 212//1 213//1 211//1 +f 214//1 213//1 212//1 +f 214//1 215//1 213//1 +f 214//1 216//1 215//1 +f 217//1 216//1 214//1 +f 217//1 218//1 216//1 +f 217//1 219//1 218//1 +f 217//1 220//1 219//1 +f 221//1 220//1 217//1 +f 221//1 222//1 220//1 +f 221//1 223//1 222//1 +f 224//1 223//1 221//1 +f 224//1 225//1 223//1 +f 224//1 226//1 225//1 +f 224//1 227//1 226//1 +f 228//1 227//1 224//1 +f 228//1 229//1 227//1 +f 228//1 230//1 229//1 +f 228//1 231//1 230//1 +f 232//1 231//1 228//1 +f 232//1 233//1 231//1 +f 232//1 234//1 233//1 +f 232//1 235//1 234//1 +f 232//1 236//1 235//1 +f 232//1 237//1 236//1 +f 232//1 238//1 237//1 +f 232//1 239//1 238//1 +f 240//1 239//1 232//1 +f 240//1 241//1 239//1 +f 240//1 242//1 241//1 +f 240//1 243//1 242//1 +f 240//1 244//1 243//1 +f 240//1 245//1 244//1 +f 240//1 246//1 245//1 +f 240//1 247//1 246//1 +f 240//1 248//1 247//1 +f 240//1 249//1 248//1 +f 240//1 250//1 249//1 +f 240//1 251//1 250//1 +f 240//1 252//1 251//1 +f 240//1 253//1 252//1 +f 240//1 254//1 253//1 +f 240//1 255//1 254//1 +f 240//1 256//1 255//1 +f 240//1 257//1 256//1 +f 240//1 258//1 257//1 +f 240//1 259//1 258//1 +f 240//1 260//1 259//1 +f 240//1 261//1 260//1 +f 240//1 262//1 261//1 +f 240//1 263//1 262//1 +f 240//1 264//1 263//1 +f 265//1 264//1 240//1 +f 265//1 266//1 264//1 +f 265//1 267//1 266//1 +f 265//1 268//1 267//1 +f 265//1 269//1 268//1 +f 270//1 269//1 265//1 +f 270//1 271//1 269//1 +f 270//1 272//1 271//1 +f 270//1 273//1 272//1 +f 274//1 273//1 270//1 +f 274//1 275//1 273//1 +f 274//1 276//1 275//1 +f 277//1 276//1 274//1 +f 277//1 278//1 276//1 +f 277//1 279//1 278//1 +f 277//1 280//1 279//1 +f 281//1 280//1 277//1 +f 282//1 280//1 281//1 +f 283//1 280//1 282//1 +f 284//1 280//1 283//1 +f 285//1 280//1 284//1 +f 286//1 280//1 285//1 +f 287//1 280//1 286//1 +f 288//1 280//1 287//1 +f 289//1 280//1 288//1 +f 290//1 280//1 289//1 +f 291//1 280//1 290//1 +f 292//1 280//1 291//1 +f 293//1 280//1 292//1 +f 294//1 280//1 293//1 +f 295//1 280//1 294//1 +f 296//1 280//1 295//1 +f 297//1 280//1 296//1 +f 298//1 280//1 297//1 +f 299//1 280//1 298//1 +f 300//1 280//1 299//1 +f 301//1 280//1 300//1 +f 302//1 280//1 301//1 +f 303//1 280//1 302//1 +f 304//1 280//1 303//1 +f 305//1 280//1 304//1 +f 306//2 280//2 305//2 +f 307//2 280//2 306//2 +f 308//2 280//2 307//2 +f 309//2 280//2 308//2 +f 310//2 280//2 309//2 +f 311//2 280//2 310//2 +f 312//2 280//2 311//2 +f 313//2 280//2 312//2 +f 314//2 280//2 313//2 +f 315//2 280//2 314//2 +f 316//2 280//2 315//2 diff --git a/alphanumeric/3.mtl b/alphanumeric/3.mtl new file mode 100644 index 0000000..81f3025 --- /dev/null +++ b/alphanumeric/3.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.023 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/3.obj b/alphanumeric/3.obj new file mode 100644 index 0000000..25634f3 --- /dev/null +++ b/alphanumeric/3.obj @@ -0,0 +1,809 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 3.mtl +o Curve_Curve.020 +v -0.000889 0.000000 -0.008219 +v 0.000194 0.000000 -0.008240 +v -0.000350 0.000000 -0.008246 +v 0.000739 0.000000 -0.008205 +v -0.001416 0.000000 -0.008161 +v 0.001280 0.000000 -0.008141 +v -0.001739 0.000000 -0.008112 +v 0.001812 0.000000 -0.008048 +v -0.002045 0.000000 -0.008060 +v -0.002334 0.000000 -0.008003 +v 0.002330 0.000000 -0.007929 +v -0.002608 0.000000 -0.007943 +v -0.002868 0.000000 -0.007877 +v 0.002829 0.000000 -0.007784 +v -0.003116 0.000000 -0.007807 +v -0.003352 0.000000 -0.007731 +v 0.003303 0.000000 -0.007613 +v -0.003580 0.000000 -0.007649 +v -0.003799 0.000000 -0.007559 +v 0.003748 0.000000 -0.007419 +v -0.004012 0.000000 -0.007464 +v -0.004219 0.000000 -0.007361 +v 0.004158 0.000000 -0.007202 +v -0.004423 0.000000 -0.007251 +v -0.004679 0.000000 -0.007095 +v 0.004529 0.000000 -0.006963 +v -0.004919 0.000000 -0.006929 +v 0.004627 0.000000 -0.006889 +v -0.005145 0.000000 -0.006753 +v 0.004729 0.000000 -0.006806 +v 0.004833 0.000000 -0.006717 +v -0.005355 0.000000 -0.006566 +v 0.004939 0.000000 -0.006621 +v 0.005044 0.000000 -0.006521 +v -0.005549 0.000000 -0.006368 +v 0.005148 0.000000 -0.006418 +v 0.005249 0.000000 -0.006313 +v -0.005727 0.000000 -0.006162 +v 0.005345 0.000000 -0.006208 +v 0.005435 0.000000 -0.006104 +v -0.005890 0.000000 -0.005945 +v 0.005518 0.000000 -0.006002 +v 0.005593 0.000000 -0.005904 +v -0.006036 0.000000 -0.005719 +v 0.005657 0.000000 -0.005811 +v -0.000318 0.000000 -0.005739 +v 0.005709 0.000000 -0.005724 +v -0.000614 0.000000 -0.005726 +v -0.000016 0.000000 -0.005731 +v 0.000288 0.000000 -0.005700 +v -0.000903 0.000000 -0.005692 +v 0.005761 0.000000 -0.005620 +v -0.006165 0.000000 -0.005484 +v 0.000427 0.000000 -0.005678 +v -0.001181 0.000000 -0.005637 +v 0.000561 0.000000 -0.005651 +v 0.000691 0.000000 -0.005619 +v -0.001447 0.000000 -0.005563 +v 0.005812 0.000000 -0.005503 +v 0.000816 0.000000 -0.005583 +v 0.000937 0.000000 -0.005540 +v -0.001696 0.000000 -0.005472 +v 0.001054 0.000000 -0.005493 +v 0.005863 0.000000 -0.005375 +v 0.001166 0.000000 -0.005440 +v -0.006277 0.000000 -0.005239 +v -0.001927 0.000000 -0.005362 +v 0.001273 0.000000 -0.005383 +v 0.001376 0.000000 -0.005320 +v 0.005911 0.000000 -0.005238 +v -0.002136 0.000000 -0.005236 +v 0.001474 0.000000 -0.005252 +v 0.001568 0.000000 -0.005179 +v -0.006372 0.000000 -0.004986 +v 0.005957 0.000000 -0.005095 +v -0.002321 0.000000 -0.005094 +v 0.001657 0.000000 -0.005101 +v 0.001774 0.000000 -0.004984 +v 0.005999 0.000000 -0.004948 +v -0.002479 0.000000 -0.004937 +v -0.006450 0.000000 -0.004725 +v 0.001878 0.000000 -0.004860 +v 0.006037 0.000000 -0.004799 +v -0.002608 0.000000 -0.004766 +v 0.001970 0.000000 -0.004730 +v 0.006070 0.000000 -0.004652 +v -0.002658 0.000000 -0.004684 +v 0.002049 0.000000 -0.004592 +v -0.006482 0.000000 -0.004588 +v -0.002703 0.000000 -0.004604 +v 0.006097 0.000000 -0.004507 +v -0.002743 0.000000 -0.004523 +v 0.002116 0.000000 -0.004448 +v -0.006511 0.000000 -0.004431 +v -0.002779 0.000000 -0.004441 +v 0.006119 0.000000 -0.004368 +v 0.002170 0.000000 -0.004299 +v -0.002811 0.000000 -0.004355 +v -0.006538 0.000000 -0.004260 +v 0.006133 0.000000 -0.004238 +v -0.002840 0.000000 -0.004263 +v 0.002211 0.000000 -0.004142 +v -0.002866 0.000000 -0.004164 +v -0.006562 0.000000 -0.004080 +v 0.006144 0.000000 -0.003885 +v -0.002890 0.000000 -0.004056 +v 0.002239 0.000000 -0.003980 +v -0.006584 0.000000 -0.003897 +v -0.002913 0.000000 -0.003936 +v 0.002254 0.000000 -0.003813 +v -0.002935 0.000000 -0.003803 +v -0.006601 0.000000 -0.003718 +v 0.006122 0.000000 -0.003546 +v 0.002256 0.000000 -0.003641 +v -0.002957 0.000000 -0.003656 +v -0.006613 0.000000 -0.003547 +v -0.002978 0.000000 -0.003491 +v 0.002245 0.000000 -0.003463 +v -0.006621 0.000000 -0.003390 +v 0.006067 0.000000 -0.003220 +v -0.003049 0.000000 -0.002963 +v 0.002220 0.000000 -0.003280 +v -0.006624 0.000000 -0.003255 +v 0.002152 0.000000 -0.002968 +v -0.006621 0.000000 -0.003145 +v 0.005979 0.000000 -0.002908 +v -0.006612 0.000000 -0.003068 +v -0.006597 0.000000 -0.003027 +v -0.006575 0.000000 -0.003018 +v -0.006529 0.000000 -0.003009 +v -0.006460 0.000000 -0.003000 +v -0.006368 0.000000 -0.002993 +v -0.006252 0.000000 -0.002986 +v -0.006113 0.000000 -0.002980 +v -0.005951 0.000000 -0.002975 +v -0.005765 0.000000 -0.002971 +v -0.005555 0.000000 -0.002968 +v 0.002052 0.000000 -0.002683 +v -0.005323 0.000000 -0.002965 +v -0.005067 0.000000 -0.002963 +v -0.004788 0.000000 -0.002963 +v -0.004754 0.000000 -0.002963 +v -0.004659 0.000000 -0.002963 +v -0.004516 0.000000 -0.002963 +v -0.004337 0.000000 -0.002963 +v -0.004133 0.000000 -0.002963 +v -0.003918 0.000000 -0.002963 +v -0.003703 0.000000 -0.002963 +v -0.003500 0.000000 -0.002963 +v -0.003321 0.000000 -0.002963 +v -0.003178 0.000000 -0.002963 +v -0.003083 0.000000 -0.002963 +v 0.005857 0.000000 -0.002609 +v 0.001919 0.000000 -0.002426 +v 0.005702 0.000000 -0.002324 +v 0.001754 0.000000 -0.002194 +v 0.005513 0.000000 -0.002050 +v 0.001554 0.000000 -0.001990 +v 0.005290 0.000000 -0.001790 +v 0.001320 0.000000 -0.001810 +v 0.001052 0.000000 -0.001655 +v 0.005034 0.000000 -0.001543 +v 0.000747 0.000000 -0.001525 +v 0.004743 0.000000 -0.001307 +v 0.000406 0.000000 -0.001419 +v 0.000029 0.000000 -0.001337 +v -0.000387 0.000000 -0.001278 +v 0.004418 0.000000 -0.001084 +v -0.000840 0.000000 -0.001242 +v -0.000854 0.000000 -0.001241 +v -0.000894 0.000000 -0.001239 +v -0.000955 0.000000 -0.001236 +v -0.001030 0.000000 -0.001231 +v -0.001116 0.000000 -0.001227 +v -0.001207 0.000000 -0.001222 +v -0.001298 0.000000 -0.001216 +v -0.001384 0.000000 -0.001212 +v -0.001459 0.000000 -0.001207 +v -0.001520 0.000000 -0.001204 +v -0.001560 0.000000 -0.001202 +v -0.001574 0.000000 -0.001201 +v -0.001574 0.000000 -0.000232 +v 0.004059 0.000000 -0.000872 +v 0.004000 0.000000 -0.000839 +v 0.003943 0.000000 -0.000806 +v 0.003890 0.000000 -0.000774 +v 0.003839 0.000000 -0.000742 +v 0.003793 0.000000 -0.000713 +v 0.003752 0.000000 -0.000686 +v 0.003715 0.000000 -0.000660 +v 0.003684 0.000000 -0.000638 +v 0.003659 0.000000 -0.000618 +v 0.003640 0.000000 -0.000601 +v 0.003628 0.000000 -0.000588 +v 0.003625 0.000000 -0.000579 +v 0.003626 0.000000 -0.000571 +v 0.003629 0.000000 -0.000563 +v 0.003635 0.000000 -0.000554 +v 0.003643 0.000000 -0.000545 +v 0.003652 0.000000 -0.000535 +v 0.003663 0.000000 -0.000526 +v 0.003677 0.000000 -0.000516 +v 0.003691 0.000000 -0.000507 +v 0.003706 0.000000 -0.000498 +v 0.003723 0.000000 -0.000489 +v 0.003741 0.000000 -0.000481 +v 0.003760 0.000000 -0.000473 +v 0.003843 0.000000 -0.000437 +v 0.003952 0.000000 -0.000382 +v 0.004082 0.000000 -0.000313 +v 0.004227 0.000000 -0.000232 +v -0.001574 -0.000000 0.000731 +v 0.004384 0.000000 -0.000142 +v 0.004547 0.000000 -0.000045 +v 0.004711 -0.000000 0.000055 +v 0.004872 -0.000000 0.000155 +v 0.005025 -0.000000 0.000253 +v 0.005165 -0.000000 0.000346 +v 0.005287 -0.000000 0.000430 +v 0.005387 -0.000000 0.000502 +v 0.005498 -0.000000 0.000591 +v 0.005608 -0.000000 0.000689 +v 0.005719 -0.000000 0.000793 +v -0.001398 -0.000000 0.000772 +v -0.001366 -0.000000 0.000779 +v -0.001320 -0.000000 0.000786 +v -0.001261 -0.000000 0.000794 +v 0.005827 -0.000000 0.000902 +v -0.001192 -0.000000 0.000802 +v -0.001112 -0.000000 0.000811 +v -0.001023 -0.000000 0.000821 +v -0.000927 -0.000000 0.000830 +v -0.000823 -0.000000 0.000840 +v -0.000715 -0.000000 0.000850 +v -0.000602 -0.000000 0.000860 +v -0.000487 -0.000000 0.000869 +v -0.000370 -0.000000 0.000878 +v -0.000127 -0.000000 0.000896 +v 0.000101 -0.000000 0.000919 +v 0.005932 -0.000000 0.001014 +v 0.000314 -0.000000 0.000947 +v 0.000515 -0.000000 0.000981 +v 0.000703 -0.000000 0.001020 +v 0.006033 -0.000000 0.001130 +v 0.000881 -0.000000 0.001064 +v 0.001049 -0.000000 0.001114 +v 0.001208 -0.000000 0.001171 +v 0.006129 -0.000000 0.001247 +v 0.001360 -0.000000 0.001234 +v 0.001505 -0.000000 0.001304 +v 0.006217 -0.000000 0.001364 +v 0.001644 -0.000000 0.001381 +v 0.006299 -0.000000 0.001479 +v 0.001780 -0.000000 0.001465 +v 0.001894 -0.000000 0.001542 +v 0.006371 -0.000000 0.001593 +v 0.002000 -0.000000 0.001619 +v 0.006434 -0.000000 0.001702 +v 0.002099 -0.000000 0.001696 +v 0.002191 -0.000000 0.001775 +v 0.006485 -0.000000 0.001806 +v 0.002277 -0.000000 0.001855 +v 0.006578 -0.000000 0.002037 +v 0.002356 -0.000000 0.001938 +v 0.002429 -0.000000 0.002023 +v 0.002497 -0.000000 0.002111 +v 0.006654 -0.000000 0.002284 +v 0.002560 -0.000000 0.002202 +v 0.002618 -0.000000 0.002297 +v 0.006713 -0.000000 0.002545 +v 0.002671 -0.000000 0.002396 +v 0.002720 -0.000000 0.002499 +v 0.002770 -0.000000 0.002621 +v 0.006755 -0.000000 0.002817 +v 0.002811 -0.000000 0.002751 +v 0.002844 -0.000000 0.002889 +v 0.006781 -0.000000 0.003096 +v 0.002868 -0.000000 0.003032 +v 0.002884 -0.000000 0.003178 +v 0.006789 -0.000000 0.003382 +v 0.002891 -0.000000 0.003327 +v 0.002889 -0.000000 0.003477 +v -0.005425 -0.000000 0.003351 +v -0.003154 -0.000000 0.003351 +v -0.005023 -0.000000 0.003351 +v -0.003153 -0.000000 0.003358 +v -0.005765 -0.000000 0.003351 +v -0.006048 -0.000000 0.003353 +v -0.006280 -0.000000 0.003356 +v -0.006465 -0.000000 0.003360 +v -0.003147 -0.000000 0.003380 +v -0.006609 -0.000000 0.003365 +v -0.006716 -0.000000 0.003373 +v -0.006793 -0.000000 0.003383 +v -0.003139 -0.000000 0.003412 +v 0.006781 -0.000000 0.003669 +v -0.006843 -0.000000 0.003395 +v -0.006873 -0.000000 0.003410 +v -0.006887 -0.000000 0.003429 +v -0.003129 -0.000000 0.003453 +v -0.006891 -0.000000 0.003450 +v -0.006890 -0.000000 0.003469 +v -0.003117 -0.000000 0.003499 +v -0.006888 -0.000000 0.003497 +v 0.002879 -0.000000 0.003626 +v -0.006884 -0.000000 0.003531 +v -0.003104 -0.000000 0.003547 +v -0.006880 -0.000000 0.003572 +v -0.003092 -0.000000 0.003596 +v -0.006874 -0.000000 0.003619 +v -0.003080 -0.000000 0.003642 +v -0.006867 -0.000000 0.003671 +v 0.002859 -0.000000 0.003774 +v -0.003070 -0.000000 0.003683 +v 0.006755 -0.000000 0.003957 +v -0.006859 -0.000000 0.003728 +v -0.003062 -0.000000 0.003715 +v -0.003057 -0.000000 0.003736 +v -0.006851 -0.000000 0.003789 +v -0.003055 -0.000000 0.003744 +v -0.002995 -0.000000 0.003948 +v 0.002832 -0.000000 0.003918 +v -0.006841 -0.000000 0.003853 +v -0.006831 -0.000000 0.003919 +v 0.002795 -0.000000 0.004057 +v -0.006820 -0.000000 0.003987 +v -0.002927 -0.000000 0.004143 +v 0.006713 -0.000000 0.004242 +v -0.006808 -0.000000 0.004056 +v -0.006687 -0.000000 0.004611 +v 0.002749 -0.000000 0.004191 +v -0.002850 -0.000000 0.004328 +v 0.002700 -0.000000 0.004302 +v 0.006654 -0.000000 0.004522 +v 0.002639 -0.000000 0.004416 +v -0.002763 -0.000000 0.004503 +v 0.002567 -0.000000 0.004531 +v -0.002669 -0.000000 0.004666 +v 0.006578 -0.000000 0.004793 +v 0.002486 -0.000000 0.004645 +v -0.006510 -0.000000 0.005133 +v 0.002396 -0.000000 0.004758 +v -0.002566 -0.000000 0.004819 +v 0.002299 -0.000000 0.004868 +v 0.006485 -0.000000 0.005054 +v -0.002457 -0.000000 0.004960 +v 0.002196 -0.000000 0.004974 +v -0.002340 -0.000000 0.005088 +v 0.002088 -0.000000 0.005075 +v 0.006291 -0.000000 0.005472 +v 0.001977 -0.000000 0.005169 +v -0.002216 -0.000000 0.005204 +v -0.006279 -0.000000 0.005620 +v 0.001863 -0.000000 0.005256 +v -0.002086 -0.000000 0.005307 +v 0.001748 -0.000000 0.005333 +v -0.001951 -0.000000 0.005396 +v 0.001633 -0.000000 0.005400 +v -0.001809 -0.000000 0.005471 +v 0.001378 -0.000000 0.005521 +v -0.001544 -0.000000 0.005582 +v 0.006055 -0.000000 0.005863 +v 0.001107 -0.000000 0.005621 +v -0.001266 -0.000000 0.005671 +v -0.005995 -0.000000 0.006072 +v 0.000824 -0.000000 0.005701 +v -0.000977 -0.000000 0.005739 +v 0.000531 -0.000000 0.005760 +v -0.000679 -0.000000 0.005785 +v 0.000231 -0.000000 0.005797 +v -0.000377 -0.000000 0.005810 +v -0.000073 -0.000000 0.005814 +v 0.005781 -0.000000 0.006230 +v -0.005658 -0.000000 0.006488 +v 0.005468 -0.000000 0.006570 +v -0.005270 -0.000000 0.006866 +v 0.005115 -0.000000 0.006884 +v -0.004830 -0.000000 0.007208 +v 0.004724 -0.000000 0.007172 +v 0.004296 -0.000000 0.007433 +v -0.004340 -0.000000 0.007511 +v 0.003830 -0.000000 0.007666 +v -0.003802 -0.000000 0.007775 +v 0.003326 -0.000000 0.007873 +v -0.003214 -0.000000 0.007999 +v 0.002786 -0.000000 0.008052 +v -0.002579 -0.000000 0.008182 +v 0.002210 -0.000000 0.008203 +v -0.001897 -0.000000 0.008325 +v 0.001598 -0.000000 0.008325 +v -0.001680 -0.000000 0.008356 +v 0.001375 -0.000000 0.008356 +v -0.001423 -0.000000 0.008382 +v 0.001114 -0.000000 0.008382 +v -0.001135 -0.000000 0.008401 +v 0.000822 -0.000000 0.008401 +v -0.000823 -0.000000 0.008416 +v 0.000507 -0.000000 0.008416 +v -0.000495 -0.000000 0.008424 +v 0.000178 -0.000000 0.008424 +v -0.000159 -0.000000 0.008426 +vn -0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +vn 0.0000 1.0000 0.0001 +usemtl SVGMat.023 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 10//1 8//1 9//1 +f 10//1 11//1 8//1 +f 12//1 11//1 10//1 +f 13//1 11//1 12//1 +f 13//1 14//1 11//1 +f 15//1 14//1 13//1 +f 16//1 14//1 15//1 +f 16//1 17//1 14//1 +f 18//1 17//1 16//1 +f 19//1 17//1 18//1 +f 19//1 20//1 17//1 +f 21//1 20//1 19//1 +f 22//1 20//1 21//1 +f 22//1 23//1 20//1 +f 24//1 23//1 22//1 +f 25//1 23//1 24//1 +f 25//1 26//1 23//1 +f 27//1 26//1 25//1 +f 27//1 28//1 26//1 +f 29//1 28//1 27//1 +f 29//1 30//1 28//1 +f 29//1 31//1 30//1 +f 32//1 31//1 29//1 +f 32//1 33//1 31//1 +f 32//1 34//1 33//1 +f 35//1 34//1 32//1 +f 35//1 36//1 34//1 +f 35//1 37//1 36//1 +f 38//1 37//1 35//1 +f 38//1 39//1 37//1 +f 38//1 40//1 39//1 +f 41//1 40//1 38//1 +f 41//1 42//1 40//1 +f 41//1 43//1 42//1 +f 44//1 43//1 41//1 +f 44//1 45//1 43//1 +f 44//1 46//1 45//1 +f 46//1 47//1 45//1 +f 44//1 48//1 46//1 +f 49//1 47//1 46//1 +f 50//1 47//1 49//1 +f 44//1 51//1 48//1 +f 50//1 52//1 47//1 +f 53//1 51//1 44//1 +f 54//1 52//1 50//1 +f 53//1 55//1 51//1 +f 56//1 52//1 54//1 +f 57//1 52//1 56//1 +f 53//1 58//1 55//1 +f 57//1 59//1 52//1 +f 60//1 59//1 57//1 +f 61//1 59//1 60//1 +f 53//1 62//1 58//1 +f 63//1 59//1 61//1 +f 63//1 64//1 59//1 +f 65//1 64//1 63//1 +f 66//1 62//1 53//1 +f 66//1 67//1 62//1 +f 68//1 64//1 65//1 +f 69//1 64//1 68//1 +f 69//1 70//1 64//1 +f 66//1 71//1 67//1 +f 72//1 70//1 69//1 +f 73//1 70//1 72//1 +f 74//1 71//1 66//1 +f 73//1 75//1 70//1 +f 74//1 76//1 71//1 +f 77//1 75//1 73//1 +f 78//1 75//1 77//1 +f 78//1 79//1 75//1 +f 74//1 80//1 76//1 +f 81//1 80//1 74//1 +f 82//1 79//1 78//1 +f 82//1 83//1 79//1 +f 81//1 84//1 80//1 +f 85//1 83//1 82//1 +f 85//1 86//1 83//1 +f 81//1 87//1 84//1 +f 88//1 86//1 85//1 +f 89//1 87//1 81//1 +f 89//1 90//1 87//1 +f 88//1 91//1 86//1 +f 89//1 92//1 90//1 +f 93//1 91//1 88//1 +f 94//1 92//1 89//1 +f 94//1 95//1 92//1 +f 93//1 96//1 91//1 +f 97//1 96//1 93//1 +f 94//1 98//1 95//1 +f 99//1 98//1 94//1 +f 97//1 100//1 96//1 +f 99//1 101//1 98//1 +f 102//1 100//1 97//1 +f 99//1 103//1 101//1 +f 104//1 103//1 99//1 +f 102//1 105//1 100//1 +f 104//1 106//1 103//1 +f 107//1 105//1 102//1 +f 108//1 106//1 104//1 +f 108//1 109//1 106//1 +f 110//1 105//1 107//1 +f 108//1 111//1 109//1 +f 112//1 111//1 108//1 +f 110//1 113//1 105//1 +f 114//1 113//1 110//1 +f 112//1 115//1 111//1 +f 116//1 115//1 112//1 +f 116//1 117//1 115//1 +f 118//1 113//1 114//1 +f 119//1 117//1 116//1 +f 118//1 120//1 113//1 +f 119//1 121//1 117//1 +f 122//1 120//1 118//1 +f 123//1 121//1 119//1 +f 124//1 120//1 122//1 +f 125//1 121//1 123//1 +f 124//1 126//1 120//1 +f 127//1 121//1 125//1 +f 128//1 121//1 127//1 +f 129//1 121//1 128//1 +f 130//1 121//1 129//1 +f 131//1 121//1 130//1 +f 132//1 121//1 131//1 +f 133//1 121//1 132//1 +f 134//1 121//1 133//1 +f 135//1 121//1 134//1 +f 136//1 121//1 135//1 +f 137//1 121//1 136//1 +f 138//1 126//1 124//1 +f 139//1 121//1 137//1 +f 140//1 121//1 139//1 +f 141//1 121//1 140//1 +f 142//2 121//2 141//2 +f 143//2 121//2 142//2 +f 144//2 121//2 143//2 +f 145//2 121//2 144//2 +f 146//2 121//2 145//2 +f 147//2 121//2 146//2 +f 148//2 121//2 147//2 +f 149//2 121//2 148//2 +f 150//2 121//2 149//2 +f 151//2 121//2 150//2 +f 152//2 121//2 151//2 +f 138//1 153//1 126//1 +f 154//1 153//1 138//1 +f 154//1 155//1 153//1 +f 156//1 155//1 154//1 +f 156//1 157//1 155//1 +f 158//1 157//1 156//1 +f 158//1 159//1 157//1 +f 160//1 159//1 158//1 +f 161//1 159//1 160//1 +f 161//1 162//1 159//1 +f 163//1 162//1 161//1 +f 163//1 164//1 162//1 +f 165//1 164//1 163//1 +f 166//1 164//1 165//1 +f 167//1 164//1 166//1 +f 167//1 168//1 164//1 +f 169//1 168//1 167//1 +f 170//1 168//1 169//1 +f 171//1 168//1 170//1 +f 172//1 168//1 171//1 +f 173//1 168//1 172//1 +f 174//1 168//1 173//1 +f 175//1 168//1 174//1 +f 176//1 168//1 175//1 +f 177//1 168//1 176//1 +f 178//1 168//1 177//1 +f 179//1 168//1 178//1 +f 180//1 168//1 179//1 +f 181//1 168//1 180//1 +f 182//1 168//1 181//1 +f 182//1 183//1 168//1 +f 182//1 184//1 183//1 +f 182//1 185//1 184//1 +f 182//1 186//1 185//1 +f 182//1 187//1 186//1 +f 182//1 188//1 187//1 +f 182//1 189//1 188//1 +f 182//1 190//1 189//1 +f 182//1 191//1 190//1 +f 182//1 192//1 191//1 +f 182//1 193//1 192//1 +f 182//1 194//1 193//1 +f 182//1 195//1 194//1 +f 182//1 196//1 195//1 +f 182//1 197//1 196//1 +f 182//1 198//1 197//1 +f 182//1 199//1 198//1 +f 182//1 200//1 199//1 +f 182//1 201//1 200//1 +f 182//1 202//1 201//1 +f 182//1 203//1 202//1 +f 182//1 204//1 203//1 +f 182//1 205//1 204//1 +f 182//1 206//1 205//1 +f 182//1 207//1 206//1 +f 182//1 208//1 207//1 +f 182//1 209//1 208//1 +f 182//1 210//1 209//1 +f 182//1 211//1 210//1 +f 212//1 211//1 182//1 +f 212//1 213//1 211//1 +f 212//1 214//1 213//1 +f 212//1 215//1 214//1 +f 212//1 216//1 215//1 +f 212//1 217//1 216//1 +f 212//1 218//1 217//1 +f 212//1 219//1 218//1 +f 212//1 220//1 219//1 +f 212//1 221//1 220//1 +f 212//1 222//1 221//1 +f 212//1 223//1 222//1 +f 224//1 223//1 212//1 +f 225//1 223//1 224//1 +f 226//1 223//1 225//1 +f 227//1 223//1 226//1 +f 227//1 228//1 223//1 +f 229//1 228//1 227//1 +f 230//1 228//1 229//1 +f 231//1 228//1 230//1 +f 232//1 228//1 231//1 +f 233//1 228//1 232//1 +f 234//1 228//1 233//1 +f 235//1 228//1 234//1 +f 236//1 228//1 235//1 +f 237//1 228//1 236//1 +f 238//1 228//1 237//1 +f 239//1 228//1 238//1 +f 239//1 240//1 228//1 +f 241//1 240//1 239//1 +f 242//1 240//1 241//1 +f 243//1 240//1 242//1 +f 243//1 244//1 240//1 +f 245//1 244//1 243//1 +f 246//1 244//1 245//1 +f 247//1 244//1 246//1 +f 247//1 248//1 244//1 +f 249//1 248//1 247//1 +f 250//1 248//1 249//1 +f 250//1 251//1 248//1 +f 252//1 251//1 250//1 +f 252//1 253//1 251//1 +f 254//1 253//1 252//1 +f 255//1 253//1 254//1 +f 255//1 256//1 253//1 +f 257//1 256//1 255//1 +f 257//1 258//1 256//1 +f 259//1 258//1 257//1 +f 260//1 258//1 259//1 +f 260//1 261//1 258//1 +f 262//1 261//1 260//1 +f 262//1 263//1 261//1 +f 264//1 263//1 262//1 +f 265//1 263//1 264//1 +f 266//1 263//1 265//1 +f 266//1 267//1 263//1 +f 268//1 267//1 266//1 +f 269//1 267//1 268//1 +f 269//1 270//1 267//1 +f 271//1 270//1 269//1 +f 272//1 270//1 271//1 +f 273//1 270//1 272//1 +f 273//1 274//1 270//1 +f 275//1 274//1 273//1 +f 276//1 274//1 275//1 +f 276//1 277//1 274//1 +f 278//1 277//1 276//1 +f 279//1 277//1 278//1 +f 279//1 280//1 277//1 +f 281//1 280//1 279//1 +f 282//1 280//1 281//1 +f 283//3 284//1 285//1 +f 283//3 286//1 284//1 +f 287//1 286//1 283//3 +f 288//1 286//1 287//1 +f 289//1 286//1 288//1 +f 290//1 286//1 289//1 +f 290//1 291//1 286//1 +f 292//1 291//1 290//1 +f 293//1 291//1 292//1 +f 294//1 291//1 293//1 +f 294//1 295//1 291//1 +f 282//1 296//1 280//1 +f 297//1 295//1 294//1 +f 298//1 295//1 297//1 +f 299//1 295//1 298//1 +f 299//1 300//1 295//1 +f 301//1 300//1 299//1 +f 302//1 300//1 301//1 +f 302//1 303//1 300//1 +f 304//1 303//1 302//1 +f 305//1 296//1 282//1 +f 306//1 303//1 304//1 +f 306//1 307//1 303//1 +f 308//1 307//1 306//1 +f 308//1 309//1 307//1 +f 310//1 309//1 308//1 +f 310//1 311//1 309//1 +f 312//1 311//1 310//1 +f 313//1 296//1 305//1 +f 312//1 314//1 311//1 +f 313//1 315//1 296//1 +f 316//1 314//1 312//1 +f 316//1 317//1 314//1 +f 316//1 318//1 317//1 +f 319//1 318//1 316//1 +f 319//1 320//1 318//1 +f 319//1 321//1 320//1 +f 322//1 315//1 313//1 +f 323//1 321//1 319//1 +f 324//1 321//1 323//1 +f 325//1 315//1 322//1 +f 326//1 321//1 324//1 +f 326//1 327//1 321//1 +f 325//1 328//1 315//1 +f 329//1 327//1 326//1 +f 330//1 327//1 329//1 +f 331//1 328//1 325//1 +f 330//1 332//1 327//1 +f 333//1 328//1 331//1 +f 333//1 334//1 328//1 +f 335//1 334//1 333//1 +f 330//1 336//1 332//1 +f 337//1 334//1 335//1 +f 330//1 338//1 336//1 +f 337//1 339//1 334//1 +f 340//1 339//1 337//1 +f 341//1 338//1 330//1 +f 342//1 339//1 340//1 +f 341//1 343//1 338//1 +f 344//1 339//1 342//1 +f 344//1 345//1 339//1 +f 341//1 346//1 343//1 +f 347//1 345//1 344//1 +f 341//1 348//1 346//1 +f 349//1 345//1 347//1 +f 349//1 350//1 345//1 +f 351//1 350//1 349//1 +f 341//1 352//1 348//1 +f 353//1 352//1 341//1 +f 354//1 350//1 351//1 +f 353//1 355//1 352//1 +f 356//1 350//1 354//1 +f 353//1 357//1 355//1 +f 358//1 350//1 356//1 +f 353//1 359//1 357//1 +f 360//1 350//1 358//1 +f 353//1 361//1 359//1 +f 360//1 362//1 350//1 +f 363//1 362//1 360//1 +f 353//1 364//1 361//1 +f 365//1 364//1 353//1 +f 366//1 362//1 363//1 +f 365//1 367//1 364//1 +f 368//1 362//1 366//1 +f 365//1 369//1 367//1 +f 370//1 362//1 368//1 +f 365//1 371//1 369//1 +f 372//1 362//1 370//1 +f 365//1 372//1 371//1 +f 365//1 362//1 372//1 +f 365//1 373//1 362//1 +f 374//1 373//1 365//1 +f 374//1 375//1 373//1 +f 376//1 375//1 374//1 +f 376//1 377//1 375//1 +f 378//1 377//1 376//1 +f 378//1 379//1 377//1 +f 378//1 380//1 379//1 +f 381//1 380//1 378//1 +f 381//1 382//1 380//1 +f 383//1 382//1 381//1 +f 383//1 384//1 382//1 +f 385//1 384//1 383//1 +f 385//1 386//1 384//1 +f 387//1 386//1 385//1 +f 387//1 388//1 386//1 +f 389//1 388//1 387//1 +f 389//1 390//1 388//1 +f 391//1 390//1 389//1 +f 391//1 392//1 390//1 +f 393//1 392//1 391//1 +f 393//1 394//1 392//1 +f 395//1 394//1 393//1 +f 395//1 396//1 394//1 +f 397//1 396//1 395//1 +f 397//1 398//1 396//1 +f 399//1 398//1 397//1 +f 399//1 400//1 398//1 +f 401//1 400//1 399//1 diff --git a/alphanumeric/34613.svg b/alphanumeric/34613.svg new file mode 100644 index 0000000..633d055 --- /dev/null +++ b/alphanumeric/34613.svg @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + diff --git a/alphanumeric/4.mtl b/alphanumeric/4.mtl new file mode 100644 index 0000000..8abd3db --- /dev/null +++ b/alphanumeric/4.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.024 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/4.obj b/alphanumeric/4.obj new file mode 100644 index 0000000..9aa3b20 --- /dev/null +++ b/alphanumeric/4.obj @@ -0,0 +1,518 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 4.mtl +o Curve.004_Curve.034 +v 0.001192 0.000000 -0.008331 +v 0.001847 0.000000 -0.008332 +v 0.001488 0.000000 -0.008332 +v 0.002275 0.000000 -0.008330 +v 0.000953 0.000000 -0.008328 +v 0.002779 0.000000 -0.008328 +v 0.000762 0.000000 -0.008323 +v 0.005138 0.000000 -0.008309 +v 0.000615 0.000000 -0.008316 +v 0.000504 0.000000 -0.008306 +v 0.005139 0.000000 -0.008304 +v 0.000423 0.000000 -0.008293 +v 0.005141 0.000000 -0.008288 +v 0.000365 0.000000 -0.008277 +v 0.005145 0.000000 -0.008265 +v 0.000323 0.000000 -0.008257 +v 0.005149 0.000000 -0.008237 +v 0.000291 0.000000 -0.008235 +v 0.005155 0.000000 -0.008204 +v 0.000243 0.000000 -0.008178 +v 0.005160 0.000000 -0.008170 +v 0.000138 0.000000 -0.008044 +v 0.005165 0.000000 -0.008135 +v 0.005170 0.000000 -0.008103 +v 0.005174 0.000000 -0.008074 +v 0.005178 0.000000 -0.008051 +v 0.005180 0.000000 -0.008036 +v -0.000022 0.000000 -0.007837 +v 0.005181 0.000000 -0.008031 +v 0.005186 0.000000 -0.007948 +v 0.005191 0.000000 -0.007783 +v -0.000232 0.000000 -0.007564 +v 0.005196 0.000000 -0.007541 +v -0.000486 0.000000 -0.007229 +v 0.005201 0.000000 -0.007228 +v -0.000783 0.000000 -0.006838 +v 0.005206 0.000000 -0.006850 +v 0.005210 0.000000 -0.006414 +v -0.001117 0.000000 -0.006398 +v 0.005214 0.000000 -0.005926 +v -0.001483 0.000000 -0.005912 +v 0.005218 0.000000 -0.005392 +v -0.001878 0.000000 -0.005388 +v 0.005221 0.000000 -0.004818 +v -0.002298 0.000000 -0.004830 +v 0.000990 0.000000 -0.005153 +v 0.000980 0.000000 -0.005139 +v 0.001040 0.000000 -0.004725 +v 0.000952 0.000000 -0.005102 +v 0.000908 0.000000 -0.005046 +v 0.000854 0.000000 -0.004976 +v 0.000792 0.000000 -0.004896 +v 0.000727 0.000000 -0.004812 +v -0.002738 0.000000 -0.004244 +v 0.005223 0.000000 -0.004210 +v 0.000662 0.000000 -0.004728 +v 0.000601 0.000000 -0.004648 +v 0.001046 0.000000 -0.004639 +v 0.000547 0.000000 -0.004578 +v 0.001052 0.000000 -0.004500 +v 0.000503 0.000000 -0.004522 +v 0.000475 0.000000 -0.004485 +v 0.001056 0.000000 -0.004313 +v 0.000464 0.000000 -0.004472 +v -0.000078 0.000000 -0.003772 +v 0.001060 0.000000 -0.004082 +v -0.003194 0.000000 -0.003636 +v 0.005224 0.000000 -0.003575 +v 0.001064 0.000000 -0.003811 +v 0.001067 0.000000 -0.003505 +v -0.000633 0.000000 -0.003054 +v -0.003652 0.000000 -0.003024 +v 0.005225 0.000000 -0.002918 +v 0.001069 0.000000 -0.003168 +v 0.001071 0.000000 -0.002804 +v -0.001189 0.000000 -0.002333 +v -0.004099 0.000000 -0.002427 +v 0.005231 -0.000000 0.001916 +v 0.001072 0.000000 -0.002418 +v -0.004530 0.000000 -0.001851 +v 0.001072 0.000000 -0.002013 +v -0.001734 0.000000 -0.001623 +v 0.001072 0.000000 -0.001595 +v -0.004943 0.000000 -0.001302 +v -0.002258 0.000000 -0.000939 +v 0.001071 0.000000 -0.001166 +v -0.005330 0.000000 -0.000785 +v 0.001053 -0.000000 0.001965 +v -0.002749 0.000000 -0.000296 +v -0.005689 0.000000 -0.000309 +v -0.006014 -0.000000 0.000123 +v -0.003195 -0.000000 0.000290 +v -0.006302 -0.000000 0.000503 +v -0.003586 -0.000000 0.000806 +v -0.006546 -0.000000 0.000826 +v -0.003909 -0.000000 0.001235 +v -0.006743 -0.000000 0.001085 +v -0.006888 -0.000000 0.001275 +v -0.004154 -0.000000 0.001564 +v -0.006977 -0.000000 0.001389 +v -0.006985 -0.000000 0.001400 +v -0.007008 -0.000000 0.001430 +v -0.007042 -0.000000 0.001475 +v -0.007086 -0.000000 0.001531 +v -0.007135 -0.000000 0.001594 +v -0.004310 -0.000000 0.001777 +v -0.007187 -0.000000 0.001662 +v -0.007239 -0.000000 0.001729 +v -0.007288 -0.000000 0.001793 +v -0.004364 -0.000000 0.001860 +v -0.007332 -0.000000 0.001849 +v -0.007367 -0.000000 0.001894 +v -0.004335 -0.000000 0.001882 +v -0.004248 -0.000000 0.001903 +v -0.007389 -0.000000 0.001923 +v -0.004107 -0.000000 0.001922 +v 0.005238 -0.000000 0.001916 +v 0.005259 -0.000000 0.001919 +v 0.005290 -0.000000 0.001922 +v -0.003915 -0.000000 0.001939 +v 0.005329 -0.000000 0.001927 +v -0.007397 -0.000000 0.001934 +v 0.005373 -0.000000 0.001932 +v 0.005420 -0.000000 0.001937 +v -0.007397 -0.000000 0.003253 +v 0.005466 -0.000000 0.001942 +v -0.003673 -0.000000 0.001953 +v 0.005511 -0.000000 0.001948 +v 0.005550 -0.000000 0.001952 +v 0.005580 -0.000000 0.001956 +v -0.003385 -0.000000 0.001966 +v 0.005601 -0.000000 0.001958 +v 0.005609 -0.000000 0.001959 +v 0.005666 -0.000000 0.001963 +v 0.005734 -0.000000 0.001968 +v -0.000836 -0.000000 0.001984 +v -0.003053 -0.000000 0.001976 +v 0.005809 -0.000000 0.001973 +v 0.005892 -0.000000 0.001977 +v -0.002681 -0.000000 0.001983 +v 0.005981 -0.000000 0.001981 +v 0.006074 -0.000000 0.001984 +v -0.002270 -0.000000 0.001988 +v -0.001344 -0.000000 0.001988 +v 0.006170 -0.000000 0.001988 +v 0.006268 -0.000000 0.001991 +v -0.001823 -0.000000 0.001990 +v 0.006367 -0.000000 0.001993 +v 0.006465 -0.000000 0.001995 +v 0.006561 -0.000000 0.001996 +v 0.006655 -0.000000 0.001996 +v 0.007323 -0.000000 0.001996 +v 0.007324 -0.000000 0.002000 +v 0.007326 -0.000000 0.002010 +v 0.007329 -0.000000 0.002026 +v 0.007333 -0.000000 0.002046 +v 0.007337 -0.000000 0.002068 +v 0.007342 -0.000000 0.002092 +v 0.007347 -0.000000 0.002115 +v 0.007351 -0.000000 0.002138 +v 0.007355 -0.000000 0.002158 +v 0.007358 -0.000000 0.002174 +v 0.007360 -0.000000 0.002184 +v 0.007361 -0.000000 0.002188 +v 0.007365 -0.000000 0.002224 +v 0.007370 -0.000000 0.002280 +v 0.007374 -0.000000 0.002352 +v 0.007378 -0.000000 0.002441 +v 0.007382 -0.000000 0.002543 +v 0.007386 -0.000000 0.002658 +v 0.007389 -0.000000 0.002784 +v 0.007392 -0.000000 0.002919 +v 0.007394 -0.000000 0.003062 +v 0.007396 -0.000000 0.003210 +v 0.007397 -0.000000 0.003363 +v -0.007397 -0.000000 0.004577 +v 0.007397 -0.000000 0.003519 +v 0.007397 -0.000000 0.004657 +v -0.007113 -0.000000 0.004620 +v -0.007037 -0.000000 0.004625 +v -0.006895 -0.000000 0.004629 +v -0.006691 -0.000000 0.004634 +v -0.006430 -0.000000 0.004638 +v -0.006118 -0.000000 0.004642 +v -0.005758 -0.000000 0.004646 +v -0.005358 -0.000000 0.004649 +v -0.004921 -0.000000 0.004652 +v -0.004452 -0.000000 0.004654 +v -0.003958 -0.000000 0.004656 +v -0.003442 -0.000000 0.004657 +v -0.002909 -0.000000 0.004657 +v -0.002832 -0.000000 0.004657 +v -0.002619 -0.000000 0.004657 +v -0.002297 -0.000000 0.004657 +v -0.001894 -0.000000 0.004657 +v -0.001436 -0.000000 0.004657 +v -0.000950 -0.000000 0.004657 +v -0.000465 -0.000000 0.004657 +v -0.000007 -0.000000 0.004657 +v 0.000397 -0.000000 0.004657 +v 0.000719 -0.000000 0.004657 +v 0.000932 -0.000000 0.004657 +v 0.001009 -0.000000 0.004657 +v 0.001046 -0.000000 0.004849 +v 0.005231 -0.000000 0.004657 +v 0.005219 -0.000000 0.006496 +v 0.006314 -0.000000 0.004657 +v 0.001051 -0.000000 0.004890 +v 0.001055 -0.000000 0.004958 +v 0.001060 -0.000000 0.005051 +v 0.001064 -0.000000 0.005166 +v 0.001068 -0.000000 0.005302 +v 0.001072 -0.000000 0.005457 +v 0.001075 -0.000000 0.005628 +v 0.001078 -0.000000 0.005813 +v 0.001080 -0.000000 0.006010 +v 0.001082 -0.000000 0.006217 +v 0.001083 -0.000000 0.006431 +v 0.001083 -0.000000 0.006650 +v 0.005200 -0.000000 0.008340 +v 0.001083 -0.000000 0.006990 +v 0.001084 -0.000000 0.007279 +v 0.001086 -0.000000 0.007520 +v 0.001089 -0.000000 0.007719 +v 0.001093 -0.000000 0.007880 +v 0.001100 -0.000000 0.008007 +v 0.001108 -0.000000 0.008105 +v 0.001119 -0.000000 0.008176 +v 0.001133 -0.000000 0.008227 +v 0.001150 -0.000000 0.008261 +v 0.001170 -0.000000 0.008283 +v 0.001195 -0.000000 0.008297 +v 0.001227 -0.000000 0.008303 +v 0.001292 -0.000000 0.008308 +v 0.001389 -0.000000 0.008314 +v 0.001515 -0.000000 0.008319 +v 0.001666 -0.000000 0.008324 +v 0.001842 -0.000000 0.008328 +v 0.002038 -0.000000 0.008331 +v 0.002254 -0.000000 0.008334 +v 0.002485 -0.000000 0.008336 +v 0.002730 -0.000000 0.008338 +v 0.002985 -0.000000 0.008339 +v 0.003250 -0.000000 0.008340 +v 0.003288 -0.000000 0.008340 +v 0.003394 -0.000000 0.008340 +v 0.003554 -0.000000 0.008340 +v 0.003755 -0.000000 0.008340 +v 0.003984 -0.000000 0.008340 +v 0.004225 -0.000000 0.008340 +v 0.004467 -0.000000 0.008340 +v 0.004695 -0.000000 0.008340 +v 0.004895 -0.000000 0.008340 +v 0.005056 -0.000000 0.008340 +v 0.005162 -0.000000 0.008340 +vn -0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.024 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 10//1 8//1 9//1 +f 10//1 11//1 8//1 +f 12//1 11//1 10//1 +f 12//1 13//1 11//1 +f 14//1 13//1 12//1 +f 14//1 15//1 13//1 +f 16//1 15//1 14//1 +f 16//1 17//1 15//1 +f 18//1 17//1 16//1 +f 18//1 19//1 17//1 +f 20//1 19//1 18//1 +f 20//1 21//1 19//1 +f 22//1 21//1 20//1 +f 22//1 23//1 21//1 +f 22//1 24//1 23//1 +f 22//1 25//1 24//1 +f 22//1 26//1 25//1 +f 22//1 27//1 26//1 +f 28//1 27//1 22//1 +f 28//1 29//1 27//1 +f 28//1 30//1 29//1 +f 28//1 31//1 30//1 +f 32//1 31//1 28//1 +f 32//1 33//1 31//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 36//1 35//1 34//1 +f 36//1 37//1 35//1 +f 36//1 38//1 37//1 +f 39//1 38//1 36//1 +f 39//1 40//1 38//1 +f 41//1 40//1 39//1 +f 41//1 42//1 40//1 +f 43//1 42//1 41//1 +f 43//1 44//1 42//1 +f 45//1 46//1 43//1 +f 46//1 44//1 43//1 +f 45//1 47//1 46//1 +f 48//1 44//1 46//1 +f 45//1 49//1 47//1 +f 45//1 50//1 49//1 +f 45//1 51//1 50//1 +f 45//1 52//1 51//1 +f 45//1 53//1 52//1 +f 54//1 53//1 45//1 +f 48//1 55//1 44//1 +f 54//1 56//1 53//1 +f 54//1 57//1 56//1 +f 58//1 55//1 48//1 +f 54//1 59//1 57//1 +f 60//1 55//1 58//1 +f 54//1 61//1 59//1 +f 54//1 62//1 61//1 +f 63//1 55//1 60//1 +f 54//1 64//1 62//1 +f 54//1 65//1 64//1 +f 66//1 55//1 63//1 +f 67//1 65//1 54//1 +f 66//1 68//1 55//1 +f 69//1 68//1 66//1 +f 70//1 68//1 69//1 +f 67//1 71//1 65//1 +f 72//1 71//1 67//1 +f 70//1 73//1 68//1 +f 74//1 73//1 70//1 +f 75//1 73//1 74//1 +f 72//1 76//1 71//1 +f 77//1 76//1 72//1 +f 75//1 78//1 73//1 +f 79//1 78//1 75//1 +f 80//1 76//1 77//1 +f 81//1 78//1 79//1 +f 80//1 82//1 76//1 +f 83//1 78//1 81//1 +f 84//1 82//1 80//1 +f 84//1 85//1 82//1 +f 86//1 78//1 83//1 +f 87//1 85//1 84//1 +f 88//1 78//1 86//1 +f 87//1 89//1 85//1 +f 90//1 89//1 87//1 +f 91//1 89//1 90//1 +f 91//1 92//1 89//1 +f 93//1 92//1 91//1 +f 93//1 94//1 92//1 +f 95//1 94//1 93//1 +f 95//1 96//1 94//1 +f 97//1 96//1 95//1 +f 98//1 96//1 97//1 +f 98//1 99//1 96//1 +f 100//1 99//1 98//1 +f 101//1 99//1 100//1 +f 102//1 99//1 101//1 +f 103//1 99//1 102//1 +f 104//1 99//1 103//1 +f 105//1 99//1 104//1 +f 105//1 106//1 99//1 +f 107//1 106//1 105//1 +f 108//1 106//1 107//1 +f 109//1 106//1 108//1 +f 109//1 110//1 106//1 +f 111//1 110//1 109//1 +f 112//1 110//1 111//1 +f 112//1 113//1 110//1 +f 112//1 114//1 113//1 +f 115//1 114//1 112//1 +f 115//1 116//1 114//1 +f 88//1 117//1 78//1 +f 88//1 118//1 117//1 +f 88//1 119//1 118//1 +f 115//1 120//1 116//1 +f 88//1 121//1 119//1 +f 122//1 120//1 115//1 +f 88//1 123//1 121//1 +f 88//1 124//1 123//1 +f 125//1 120//1 122//1 +f 88//1 126//1 124//1 +f 125//1 127//1 120//1 +f 88//1 128//1 126//1 +f 88//1 129//1 128//1 +f 88//1 130//1 129//1 +f 125//1 131//1 127//1 +f 88//1 132//1 130//1 +f 88//1 133//1 132//1 +f 88//1 134//1 133//1 +f 88//1 135//1 134//1 +f 136//1 135//1 88//1 +f 125//1 137//1 131//1 +f 136//1 138//1 135//1 +f 136//1 139//1 138//1 +f 125//1 140//1 137//1 +f 136//1 141//1 139//1 +f 136//1 142//1 141//1 +f 125//1 143//1 140//1 +f 144//1 142//1 136//1 +f 144//1 145//1 142//1 +f 144//1 146//1 145//1 +f 125//1 147//1 143//1 +f 147//1 146//1 144//1 +f 125//1 146//1 147//1 +f 125//1 148//1 146//1 +f 125//1 149//1 148//1 +f 125//1 150//1 149//1 +f 125//1 151//1 150//1 +f 125//1 152//1 151//1 +f 125//1 153//1 152//1 +f 125//1 154//1 153//1 +f 125//1 155//1 154//1 +f 125//1 156//1 155//1 +f 125//1 157//1 156//1 +f 125//1 158//1 157//1 +f 125//1 159//1 158//1 +f 125//1 160//1 159//1 +f 125//1 161//1 160//1 +f 125//1 162//1 161//1 +f 125//1 163//1 162//1 +f 125//1 164//1 163//1 +f 125//1 165//1 164//1 +f 125//1 166//1 165//1 +f 125//1 167//1 166//1 +f 125//1 168//1 167//1 +f 125//1 169//1 168//1 +f 125//1 170//1 169//1 +f 125//1 171//1 170//1 +f 125//1 172//1 171//1 +f 125//1 173//1 172//1 +f 125//1 174//1 173//1 +f 125//1 175//1 174//1 +f 176//1 175//1 125//1 +f 176//1 177//1 175//1 +f 176//1 178//1 177//1 +f 179//1 178//1 176//1 +f 180//1 178//1 179//1 +f 181//1 178//1 180//1 +f 182//1 178//1 181//1 +f 183//1 178//1 182//1 +f 184//1 178//1 183//1 +f 185//1 178//1 184//1 +f 186//1 178//1 185//1 +f 187//1 178//1 186//1 +f 188//1 178//1 187//1 +f 189//1 178//1 188//1 +f 190//1 178//1 189//1 +f 191//1 178//1 190//1 +f 192//2 178//2 191//2 +f 193//2 178//2 192//2 +f 194//2 178//2 193//2 +f 195//2 178//2 194//2 +f 196//2 178//2 195//2 +f 197//2 178//2 196//2 +f 198//2 178//2 197//2 +f 199//2 178//2 198//2 +f 200//2 178//2 199//2 +f 201//2 178//2 200//2 +f 202//2 178//2 201//2 +f 203//2 178//2 202//2 +f 204//1 205//1 203//1 +f 205//2 178//2 203//2 +f 204//1 206//1 205//1 +f 207//2 178//2 205//2 +f 208//1 206//1 204//1 +f 209//1 206//1 208//1 +f 210//1 206//1 209//1 +f 211//1 206//1 210//1 +f 212//1 206//1 211//1 +f 213//1 206//1 212//1 +f 214//1 206//1 213//1 +f 215//1 206//1 214//1 +f 216//1 206//1 215//1 +f 217//1 206//1 216//1 +f 218//1 206//1 217//1 +f 219//1 206//1 218//1 +f 219//1 220//1 206//1 +f 221//1 220//1 219//1 +f 222//1 220//1 221//1 +f 223//1 220//1 222//1 +f 224//1 220//1 223//1 +f 225//1 220//1 224//1 +f 226//1 220//1 225//1 +f 227//1 220//1 226//1 +f 228//1 220//1 227//1 +f 229//1 220//1 228//1 +f 230//1 220//1 229//1 +f 231//1 220//1 230//1 +f 232//1 220//1 231//1 +f 233//1 220//1 232//1 +f 234//1 220//1 233//1 +f 235//1 220//1 234//1 +f 236//1 220//1 235//1 +f 237//1 220//1 236//1 +f 238//1 220//1 237//1 +f 239//1 220//1 238//1 +f 240//1 220//1 239//1 +f 241//1 220//1 240//1 +f 242//1 220//1 241//1 +f 243//1 220//1 242//1 +f 244//1 220//1 243//1 +f 245//2 220//2 244//2 +f 246//2 220//2 245//2 +f 247//2 220//2 246//2 +f 248//2 220//2 247//2 +f 249//2 220//2 248//2 +f 250//2 220//2 249//2 +f 251//2 220//2 250//2 +f 252//2 220//2 251//2 +f 253//2 220//2 252//2 +f 254//2 220//2 253//2 +f 255//2 220//2 254//2 diff --git a/alphanumeric/5.mtl b/alphanumeric/5.mtl new file mode 100644 index 0000000..5d74c4f --- /dev/null +++ b/alphanumeric/5.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.025 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/5.obj b/alphanumeric/5.obj new file mode 100644 index 0000000..d2a21d3 --- /dev/null +++ b/alphanumeric/5.obj @@ -0,0 +1,624 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 5.mtl +o Curve.005_Curve.045 +v 0.000030 0.000000 -0.008360 +v 0.006148 0.000000 -0.008361 +v 0.000759 0.000000 -0.008361 +v 0.006149 0.000000 -0.008352 +v -0.000671 0.000000 -0.008359 +v -0.001337 0.000000 -0.008357 +v -0.001961 0.000000 -0.008356 +v -0.002536 0.000000 -0.008352 +v 0.006151 0.000000 -0.008329 +v -0.003058 0.000000 -0.008349 +v -0.003517 0.000000 -0.008345 +v -0.003908 0.000000 -0.008341 +v -0.004225 0.000000 -0.008336 +v -0.004460 0.000000 -0.008330 +v -0.004607 0.000000 -0.008325 +v 0.006155 0.000000 -0.008293 +v -0.004660 0.000000 -0.008319 +v -0.004709 0.000000 -0.008121 +v 0.006159 0.000000 -0.008249 +v 0.006164 0.000000 -0.008198 +v 0.006169 0.000000 -0.008145 +v 0.006175 0.000000 -0.008091 +v -0.004816 0.000000 -0.007624 +v 0.006179 0.000000 -0.008041 +v 0.006184 0.000000 -0.007996 +v 0.006187 0.000000 -0.007961 +v 0.006189 0.000000 -0.007937 +v 0.006190 0.000000 -0.007929 +v 0.006195 0.000000 -0.007861 +v 0.006199 0.000000 -0.007779 +v 0.006203 0.000000 -0.007683 +v 0.006205 0.000000 -0.007577 +v -0.004968 0.000000 -0.006887 +v 0.006208 0.000000 -0.007462 +v 0.006211 0.000000 -0.007338 +v 0.006212 0.000000 -0.007208 +v 0.006213 0.000000 -0.007075 +v 0.006213 0.000000 -0.006938 +v 0.006212 0.000000 -0.006801 +v -0.005153 0.000000 -0.005973 +v 0.006211 0.000000 -0.006665 +v 0.006208 0.000000 -0.006531 +v 0.006190 0.000000 -0.005571 +v -0.005360 0.000000 -0.004939 +v -0.002241 0.000000 -0.005541 +v 0.001977 0.000000 -0.005553 +v -0.002242 0.000000 -0.005538 +v -0.002244 0.000000 -0.005531 +v -0.002246 0.000000 -0.005519 +v -0.002249 0.000000 -0.005505 +v -0.002253 0.000000 -0.005489 +v -0.002256 0.000000 -0.005472 +v -0.002260 0.000000 -0.005455 +v -0.002263 0.000000 -0.005439 +v -0.002267 0.000000 -0.005424 +v -0.002269 0.000000 -0.005413 +v -0.002271 0.000000 -0.005405 +v -0.002272 0.000000 -0.005403 +v -0.002277 0.000000 -0.005373 +v -0.002288 0.000000 -0.005318 +v -0.002302 0.000000 -0.005242 +v -0.002321 0.000000 -0.005147 +v -0.002344 0.000000 -0.005032 +v -0.002370 0.000000 -0.004902 +v -0.005577 0.000000 -0.003846 +v -0.002398 0.000000 -0.004757 +v -0.002430 0.000000 -0.004599 +v -0.002463 0.000000 -0.004431 +v -0.002498 0.000000 -0.004253 +v -0.002534 0.000000 -0.004069 +v -0.002572 0.000000 -0.003879 +v -0.002630 0.000000 -0.003573 +v -0.005791 0.000000 -0.002756 +v -0.002682 0.000000 -0.003305 +v -0.002725 0.000000 -0.003074 +v -0.002761 0.000000 -0.002876 +v 0.000309 0.000000 -0.003063 +v 0.001090 0.000000 -0.003064 +v 0.000704 0.000000 -0.003072 +v 0.001466 0.000000 -0.003040 +v 0.000070 0.000000 -0.003051 +v -0.000152 0.000000 -0.003035 +v 0.001833 0.000000 -0.003001 +v -0.000361 0.000000 -0.003015 +v -0.000560 0.000000 -0.002990 +v 0.002190 0.000000 -0.002945 +v -0.000752 0.000000 -0.002958 +v -0.000940 0.000000 -0.002921 +v 0.002538 0.000000 -0.002874 +v -0.001126 0.000000 -0.002875 +v -0.002790 0.000000 -0.002709 +v -0.001314 0.000000 -0.002822 +v 0.002876 0.000000 -0.002787 +v -0.001506 0.000000 -0.002761 +v 0.003206 0.000000 -0.002683 +v -0.001706 0.000000 -0.002690 +v -0.005991 0.000000 -0.001727 +v -0.002812 0.000000 -0.002572 +v -0.001916 0.000000 -0.002609 +v 0.003527 0.000000 -0.002563 +v -0.002140 0.000000 -0.002517 +v -0.002827 0.000000 -0.002462 +v 0.003839 0.000000 -0.002428 +v -0.002228 0.000000 -0.002480 +v -0.002314 0.000000 -0.002444 +v -0.002836 0.000000 -0.002376 +v -0.002396 0.000000 -0.002411 +v 0.004143 0.000000 -0.002276 +v -0.002473 0.000000 -0.002379 +v -0.002544 0.000000 -0.002349 +v -0.002840 0.000000 -0.002314 +v -0.002609 0.000000 -0.002323 +v -0.002667 0.000000 -0.002299 +v -0.002838 0.000000 -0.002272 +v -0.002716 0.000000 -0.002279 +v -0.002757 0.000000 -0.002263 +v 0.004438 0.000000 -0.002109 +v -0.002830 0.000000 -0.002248 +v -0.002788 0.000000 -0.002251 +v -0.002809 0.000000 -0.002244 +v -0.002817 0.000000 -0.002241 +v 0.004591 0.000000 -0.002010 +v 0.004753 0.000000 -0.001893 +v 0.004922 0.000000 -0.001761 +v 0.005095 0.000000 -0.001616 +v -0.006165 0.000000 -0.000821 +v 0.005268 0.000000 -0.001461 +v 0.005439 0.000000 -0.001300 +v 0.005606 0.000000 -0.001135 +v 0.005763 0.000000 -0.000968 +v 0.005910 0.000000 -0.000804 +v -0.006302 0.000000 -0.000096 +v -0.000723 0.000000 -0.000584 +v -0.000296 0.000000 -0.000609 +v 0.006043 0.000000 -0.000644 +v 0.006160 0.000000 -0.000492 +v 0.000135 0.000000 -0.000591 +v 0.000558 0.000000 -0.000531 +v -0.001139 0.000000 -0.000519 +v 0.000968 0.000000 -0.000428 +v -0.001535 0.000000 -0.000415 +v 0.006256 0.000000 -0.000351 +v 0.001354 0.000000 -0.000280 +v -0.001901 0.000000 -0.000272 +v 0.006384 0.000000 -0.000131 +v 0.001707 0.000000 -0.000087 +v -0.002230 0.000000 -0.000091 +v 0.006500 -0.000000 0.000098 +v -0.006388 -0.000000 0.000387 +v -0.002512 -0.000000 0.000127 +v 0.001873 -0.000000 0.000027 +v 0.002027 -0.000000 0.000145 +v 0.006603 -0.000000 0.000335 +v -0.002739 -0.000000 0.000381 +v 0.002168 -0.000000 0.000269 +v 0.002297 -0.000000 0.000399 +v 0.006695 -0.000000 0.000579 +v -0.002932 -0.000000 0.000651 +v -0.006412 -0.000000 0.000567 +v 0.002415 -0.000000 0.000536 +v 0.002521 -0.000000 0.000681 +v -0.006392 -0.000000 0.000573 +v -0.006341 -0.000000 0.000579 +v 0.006775 -0.000000 0.000830 +v -0.006262 -0.000000 0.000586 +v -0.006158 -0.000000 0.000592 +v -0.006029 -0.000000 0.000598 +v -0.005879 -0.000000 0.000604 +v -0.005711 -0.000000 0.000609 +v -0.005525 -0.000000 0.000614 +v -0.005323 -0.000000 0.000619 +v -0.004654 -0.000000 0.000627 +v -0.004620 -0.000000 0.000627 +v -0.004207 -0.000000 0.000633 +v -0.003059 -0.000000 0.000649 +v -0.005110 -0.000000 0.000622 +v -0.004886 -0.000000 0.000625 +v -0.004526 -0.000000 0.000629 +v -0.004385 -0.000000 0.000631 +v -0.004006 -0.000000 0.000636 +v -0.003793 -0.000000 0.000639 +v -0.003579 -0.000000 0.000642 +v -0.003378 -0.000000 0.000645 +v -0.003201 -0.000000 0.000647 +v -0.002966 -0.000000 0.000650 +v 0.002617 -0.000000 0.000834 +v 0.006842 -0.000000 0.001089 +v 0.002702 -0.000000 0.000995 +v 0.002776 -0.000000 0.001165 +v 0.006897 -0.000000 0.001354 +v 0.002842 -0.000000 0.001345 +v 0.002897 -0.000000 0.001536 +v 0.006939 -0.000000 0.001625 +v 0.002944 -0.000000 0.001737 +v 0.006969 -0.000000 0.001902 +v 0.003013 -0.000000 0.002190 +v 0.006987 -0.000000 0.002184 +v 0.006991 -0.000000 0.002471 +v 0.003032 -0.000000 0.002627 +v 0.006982 -0.000000 0.002763 +v 0.003000 -0.000000 0.003045 +v 0.006971 -0.000000 0.002961 +v 0.006953 -0.000000 0.003152 +v 0.002920 -0.000000 0.003441 +v 0.006930 -0.000000 0.003338 +v 0.006902 -0.000000 0.003520 +v 0.002793 -0.000000 0.003814 +v 0.006867 -0.000000 0.003697 +v -0.007043 -0.000000 0.003801 +v -0.005116 -0.000000 0.003651 +v -0.007043 -0.000000 0.003639 +v -0.003183 -0.000000 0.003669 +v -0.003182 -0.000000 0.003674 +v -0.003178 -0.000000 0.003687 +v -0.003172 -0.000000 0.003707 +v 0.006826 -0.000000 0.003870 +v -0.003165 -0.000000 0.003731 +v -0.003156 -0.000000 0.003759 +v -0.003148 -0.000000 0.003789 +v -0.003139 -0.000000 0.003819 +v -0.007037 -0.000000 0.003892 +v 0.002622 -0.000000 0.004159 +v -0.003130 -0.000000 0.003846 +v -0.003123 -0.000000 0.003872 +v 0.006780 -0.000000 0.004040 +v -0.003117 -0.000000 0.003891 +v -0.003113 -0.000000 0.003904 +v -0.007026 -0.000000 0.003995 +v -0.003112 -0.000000 0.003909 +v -0.003039 -0.000000 0.004125 +v -0.007008 -0.000000 0.004107 +v 0.006727 -0.000000 0.004208 +v -0.006984 -0.000000 0.004228 +v -0.002953 -0.000000 0.004326 +v 0.002406 -0.000000 0.004476 +v 0.006668 -0.000000 0.004375 +v -0.006956 -0.000000 0.004355 +v -0.002854 -0.000000 0.004512 +v -0.006922 -0.000000 0.004486 +v 0.006603 -0.000000 0.004539 +v 0.002149 -0.000000 0.004760 +v -0.006885 -0.000000 0.004618 +v -0.002741 -0.000000 0.004684 +v 0.006532 -0.000000 0.004704 +v -0.006845 -0.000000 0.004749 +v -0.002614 -0.000000 0.004841 +v 0.006454 -0.000000 0.004869 +v -0.006801 -0.000000 0.004878 +v 0.001851 -0.000000 0.005011 +v -0.002475 -0.000000 0.004983 +v 0.006227 -0.000000 0.005279 +v -0.006755 -0.000000 0.005003 +v -0.002321 -0.000000 0.005111 +v -0.006707 -0.000000 0.005120 +v 0.001514 -0.000000 0.005224 +v -0.002154 -0.000000 0.005224 +v -0.006658 -0.000000 0.005229 +v 0.001139 -0.000000 0.005397 +v -0.001973 -0.000000 0.005323 +v -0.006436 -0.000000 0.005638 +v 0.005967 -0.000000 0.005664 +v -0.001778 -0.000000 0.005408 +v 0.000729 -0.000000 0.005529 +v -0.001570 -0.000000 0.005478 +v -0.001347 -0.000000 0.005535 +v 0.000604 -0.000000 0.005555 +v -0.001203 -0.000000 0.005560 +v 0.000456 -0.000000 0.005577 +v -0.001039 -0.000000 0.005580 +v 0.000290 -0.000000 0.005593 +v -0.000861 -0.000000 0.005595 +v 0.000109 -0.000000 0.005605 +v -0.000672 -0.000000 0.005607 +v -0.000081 -0.000000 0.005612 +v -0.000476 -0.000000 0.005613 +v -0.000278 -0.000000 0.005615 +v -0.006173 -0.000000 0.006021 +v 0.005675 -0.000000 0.006026 +v -0.005869 -0.000000 0.006379 +v 0.005351 -0.000000 0.006362 +v 0.004997 -0.000000 0.006674 +v -0.005527 -0.000000 0.006708 +v 0.004610 -0.000000 0.006961 +v -0.005148 -0.000000 0.007010 +v 0.004194 -0.000000 0.007223 +v -0.004733 -0.000000 0.007282 +v 0.003747 -0.000000 0.007460 +v -0.004284 -0.000000 0.007525 +v 0.003269 -0.000000 0.007670 +v -0.003803 -0.000000 0.007738 +v 0.002762 -0.000000 0.007855 +v -0.003290 -0.000000 0.007919 +v 0.002225 -0.000000 0.008013 +v -0.002746 -0.000000 0.008068 +v 0.001659 -0.000000 0.008145 +v -0.002174 -0.000000 0.008183 +v 0.001455 -0.000000 0.008182 +v 0.001221 -0.000000 0.008215 +v -0.001575 -0.000000 0.008265 +v 0.000963 -0.000000 0.008243 +v 0.000686 -0.000000 0.008267 +v -0.001338 -0.000000 0.008286 +v 0.000395 -0.000000 0.008286 +v 0.000096 -0.000000 0.008300 +v -0.001077 -0.000000 0.008300 +v -0.000206 -0.000000 0.008308 +v -0.000798 -0.000000 0.008309 +v -0.000506 -0.000000 0.008311 +vn 0.0000 1.0000 0.0000 +vn 0.0000 -0.0000 1.0000 +vn 0.0001 -0.0000 1.0000 +vn -0.0001 -0.0000 1.0000 +usemtl SVGMat.025 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 6//1 4//1 5//1 +f 7//1 4//1 6//1 +f 8//1 4//1 7//1 +f 8//1 9//1 4//1 +f 10//1 9//1 8//1 +f 11//1 9//1 10//1 +f 12//1 9//1 11//1 +f 13//1 9//1 12//1 +f 14//1 9//1 13//1 +f 15//1 9//1 14//1 +f 15//1 16//1 9//1 +f 17//1 16//1 15//1 +f 18//1 16//1 17//1 +f 18//1 19//1 16//1 +f 18//1 20//1 19//1 +f 18//1 21//1 20//1 +f 18//1 22//1 21//1 +f 23//1 22//1 18//1 +f 23//1 24//1 22//1 +f 23//1 25//1 24//1 +f 23//1 26//1 25//1 +f 23//1 27//1 26//1 +f 23//1 28//1 27//1 +f 23//1 29//1 28//1 +f 23//1 30//1 29//1 +f 23//1 31//1 30//1 +f 23//1 32//1 31//1 +f 33//1 32//1 23//1 +f 33//1 34//1 32//1 +f 33//1 35//1 34//1 +f 33//1 36//1 35//1 +f 33//1 37//1 36//1 +f 33//1 38//1 37//1 +f 33//1 39//1 38//1 +f 40//1 39//1 33//1 +f 40//1 41//1 39//1 +f 40//1 42//1 41//1 +f 40//1 43//1 42//1 +f 44//1 45//1 40//1 +f 45//1 43//1 40//1 +f 45//1 46//1 43//1 +f 44//1 47//1 45//1 +f 44//1 48//1 47//1 +f 44//1 49//1 48//1 +f 44//1 50//1 49//1 +f 44//1 51//1 50//1 +f 44//1 52//1 51//1 +f 44//1 53//1 52//1 +f 44//1 54//1 53//1 +f 44//1 55//1 54//1 +f 44//1 56//1 55//1 +f 44//1 57//1 56//1 +f 44//1 58//1 57//1 +f 44//1 59//1 58//1 +f 44//1 60//1 59//1 +f 44//1 61//1 60//1 +f 44//1 62//1 61//1 +f 44//1 63//1 62//1 +f 44//1 64//1 63//1 +f 65//1 64//1 44//1 +f 65//1 66//1 64//1 +f 65//1 67//1 66//1 +f 65//1 68//1 67//1 +f 65//1 69//1 68//1 +f 65//1 70//1 69//1 +f 65//1 71//1 70//1 +f 65//1 72//1 71//1 +f 73//1 72//1 65//1 +f 73//1 74//1 72//1 +f 73//1 75//1 74//1 +f 73//1 76//1 75//1 +f 77//1 78//1 79//1 +f 77//1 80//1 78//1 +f 81//1 80//1 77//1 +f 82//1 80//1 81//1 +f 82//1 83//1 80//1 +f 84//1 83//1 82//1 +f 85//1 83//1 84//1 +f 85//1 86//1 83//1 +f 87//1 86//1 85//1 +f 88//1 86//1 87//1 +f 88//1 89//1 86//1 +f 90//1 89//1 88//1 +f 73//1 91//1 76//1 +f 92//1 89//1 90//1 +f 92//1 93//1 89//1 +f 94//1 93//1 92//1 +f 94//1 95//1 93//1 +f 96//1 95//1 94//1 +f 97//1 91//1 73//1 +f 97//1 98//1 91//1 +f 99//1 95//1 96//1 +f 99//1 100//1 95//1 +f 101//1 100//1 99//1 +f 97//1 102//1 98//1 +f 101//1 103//1 100//1 +f 104//1 103//1 101//1 +f 105//1 103//1 104//1 +f 97//1 106//1 102//1 +f 107//1 103//1 105//1 +f 107//1 108//1 103//1 +f 109//1 108//1 107//1 +f 110//1 108//1 109//1 +f 97//1 111//1 106//1 +f 112//1 108//1 110//1 +f 113//1 108//1 112//1 +f 97//1 114//1 111//1 +f 115//1 108//1 113//1 +f 116//1 108//1 115//1 +f 116//1 117//1 108//1 +f 97//1 118//1 114//1 +f 119//1 117//1 116//1 +f 120//1 117//1 119//1 +f 97//1 121//1 118//1 +f 121//1 117//1 120//1 +f 97//1 117//1 121//1 +f 97//1 122//1 117//1 +f 97//1 123//1 122//1 +f 97//1 124//1 123//1 +f 97//1 125//1 124//1 +f 126//1 125//1 97//1 +f 126//1 127//1 125//1 +f 126//1 128//1 127//1 +f 126//1 129//1 128//1 +f 126//1 130//1 129//1 +f 126//1 131//1 130//1 +f 132//1 133//1 126//1 +f 133//1 134//1 126//1 +f 134//1 131//1 126//1 +f 134//1 135//1 131//1 +f 134//1 136//1 135//1 +f 137//1 136//1 134//1 +f 138//1 136//1 137//1 +f 132//1 139//1 133//1 +f 140//1 136//1 138//1 +f 132//1 141//1 139//1 +f 140//1 142//1 136//1 +f 143//1 142//1 140//1 +f 132//1 144//1 141//1 +f 143//1 145//1 142//1 +f 146//1 145//1 143//1 +f 132//1 147//1 144//1 +f 146//1 148//1 145//1 +f 149//1 147//1 132//1 +f 149//1 150//1 147//1 +f 151//1 148//1 146//1 +f 152//1 148//1 151//1 +f 152//1 153//1 148//1 +f 149//1 154//1 150//1 +f 155//1 153//1 152//1 +f 156//1 153//1 155//1 +f 156//1 157//1 153//1 +f 149//1 158//1 154//1 +f 159//1 158//1 149//1 +f 160//1 157//1 156//1 +f 161//1 157//1 160//1 +f 162//1 158//1 159//1 +f 163//1 158//1 162//1 +f 161//1 164//1 157//1 +f 165//1 158//1 163//1 +f 166//1 158//1 165//1 +f 167//1 158//1 166//1 +f 168//1 158//1 167//1 +f 169//1 158//1 168//1 +f 170//1 158//1 169//1 +f 171//1 172//1 170//1 +f 172//1 173//1 170//1 +f 173//1 174//1 170//1 +f 174//1 175//1 170//1 +f 175//1 158//1 170//1 +f 176//1 172//1 171//1 +f 177//1 172//1 176//1 +f 178//1 179//1 173//1 +f 179//2 174//2 173//2 +f 180//3 181//3 174//4 +f 181//1 182//1 174//1 +f 182//2 175//2 174//4 +f 183//1 175//1 182//1 +f 184//2 175//3 183//2 +f 185//1 158//1 175//1 +f 186//1 164//1 161//1 +f 186//1 187//1 164//1 +f 188//1 187//1 186//1 +f 189//1 187//1 188//1 +f 189//1 190//1 187//1 +f 191//1 190//1 189//1 +f 192//1 190//1 191//1 +f 192//1 193//1 190//1 +f 194//1 193//1 192//1 +f 194//1 195//1 193//1 +f 196//1 195//1 194//1 +f 196//1 197//1 195//1 +f 196//1 198//1 197//1 +f 199//1 198//1 196//1 +f 199//1 200//1 198//1 +f 201//1 200//1 199//1 +f 201//1 202//1 200//1 +f 201//1 203//1 202//1 +f 204//1 203//1 201//1 +f 204//1 205//1 203//1 +f 204//1 206//1 205//1 +f 207//1 206//1 204//1 +f 207//1 208//1 206//1 +f 209//1 210//1 211//1 +f 209//1 212//1 210//1 +f 209//1 213//1 212//1 +f 209//1 214//1 213//1 +f 209//1 215//1 214//1 +f 207//1 216//1 208//1 +f 209//1 217//1 215//1 +f 209//1 218//1 217//1 +f 209//1 219//1 218//1 +f 209//1 220//1 219//1 +f 221//1 220//1 209//1 +f 222//1 216//1 207//1 +f 221//1 223//1 220//1 +f 221//1 224//1 223//1 +f 222//1 225//1 216//1 +f 221//1 226//1 224//1 +f 221//1 227//1 226//1 +f 228//1 227//1 221//1 +f 228//1 229//1 227//1 +f 228//1 230//1 229//1 +f 231//1 230//1 228//1 +f 222//1 232//1 225//1 +f 233//1 230//1 231//1 +f 233//1 234//1 230//1 +f 235//1 232//1 222//1 +f 235//1 236//1 232//1 +f 237//1 234//1 233//1 +f 237//1 238//1 234//1 +f 239//1 238//1 237//1 +f 235//1 240//1 236//1 +f 241//1 240//1 235//1 +f 242//1 238//1 239//1 +f 242//1 243//1 238//1 +f 241//1 244//1 240//1 +f 245//1 243//1 242//1 +f 245//1 246//1 243//1 +f 241//1 247//1 244//1 +f 248//1 246//1 245//1 +f 249//1 247//1 241//1 +f 248//1 250//1 246//1 +f 249//1 251//1 247//1 +f 252//1 250//1 248//1 +f 252//1 253//1 250//1 +f 254//1 253//1 252//1 +f 255//1 251//1 249//1 +f 254//1 256//1 253//1 +f 257//1 256//1 254//1 +f 258//1 251//1 255//1 +f 257//1 259//1 256//1 +f 260//1 259//1 257//1 +f 258//1 261//1 251//1 +f 260//1 262//1 259//1 +f 263//1 261//1 258//1 +f 260//1 264//1 262//1 +f 260//1 265//1 264//1 +f 266//1 261//1 263//1 +f 260//1 267//1 265//1 +f 268//1 261//1 266//1 +f 260//1 269//1 267//1 +f 270//1 261//1 268//1 +f 260//1 271//1 269//1 +f 272//1 261//1 270//1 +f 260//1 273//1 271//1 +f 274//1 261//1 272//1 +f 260//1 275//1 273//1 +f 276//1 261//1 274//1 +f 260//1 276//1 275//1 +f 260//1 261//1 276//1 +f 277//1 261//1 260//1 +f 277//1 278//1 261//1 +f 279//1 278//1 277//1 +f 279//1 280//1 278//1 +f 279//1 281//1 280//1 +f 282//1 281//1 279//1 +f 282//1 283//1 281//1 +f 284//1 283//1 282//1 +f 284//1 285//1 283//1 +f 286//1 285//1 284//1 +f 286//1 287//1 285//1 +f 288//1 287//1 286//1 +f 288//1 289//1 287//1 +f 290//1 289//1 288//1 +f 290//1 291//1 289//1 +f 292//1 291//1 290//1 +f 292//1 293//1 291//1 +f 294//1 293//1 292//1 +f 294//1 295//1 293//1 +f 296//1 295//1 294//1 +f 296//1 297//1 295//1 +f 296//1 298//1 297//1 +f 299//1 298//1 296//1 +f 299//1 300//1 298//1 +f 299//1 301//1 300//1 +f 302//1 301//1 299//1 +f 302//1 303//1 301//1 +f 302//1 304//1 303//1 +f 305//1 304//1 302//1 +f 305//1 306//1 304//1 +f 307//1 306//1 305//1 +f 307//1 308//1 306//1 diff --git a/alphanumeric/6.mtl b/alphanumeric/6.mtl new file mode 100644 index 0000000..633b228 --- /dev/null +++ b/alphanumeric/6.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.026 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/6.obj b/alphanumeric/6.obj new file mode 100644 index 0000000..338e2f8 --- /dev/null +++ b/alphanumeric/6.obj @@ -0,0 +1,657 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 6.mtl +o Curve.003_Curve.053 +v 0.000215 0.000000 -0.008428 +v 0.000654 0.000000 -0.008428 +v 0.000432 0.000000 -0.008430 +v 0.000874 0.000000 -0.008425 +v 0.000005 0.000000 -0.008425 +v 0.001088 0.000000 -0.008420 +v -0.000190 0.000000 -0.008419 +v 0.001291 0.000000 -0.008412 +v -0.000366 0.000000 -0.008411 +v 0.001477 0.000000 -0.008402 +v -0.000518 0.000000 -0.008401 +v 0.001640 0.000000 -0.008391 +v -0.000714 0.000000 -0.008379 +v 0.001778 0.000000 -0.008377 +v -0.000924 0.000000 -0.008347 +v 0.002057 0.000000 -0.008339 +v -0.001144 0.000000 -0.008306 +v 0.002334 0.000000 -0.008290 +v -0.001371 0.000000 -0.008257 +v 0.002607 0.000000 -0.008233 +v -0.001603 0.000000 -0.008200 +v 0.002876 0.000000 -0.008166 +v -0.001837 0.000000 -0.008136 +v 0.003139 0.000000 -0.008091 +v -0.002070 0.000000 -0.008066 +v 0.003396 0.000000 -0.008007 +v -0.002298 0.000000 -0.007991 +v 0.003646 0.000000 -0.007915 +v -0.002518 0.000000 -0.007912 +v 0.003889 0.000000 -0.007815 +v -0.002729 0.000000 -0.007830 +v -0.002927 0.000000 -0.007746 +v 0.004123 0.000000 -0.007708 +v -0.003108 0.000000 -0.007659 +v 0.004348 0.000000 -0.007593 +v -0.003552 0.000000 -0.007412 +v 0.004564 0.000000 -0.007470 +v 0.004768 0.000000 -0.007341 +v -0.003970 0.000000 -0.007128 +v 0.004905 0.000000 -0.007246 +v 0.005042 0.000000 -0.007142 +v 0.005180 0.000000 -0.007029 +v -0.004363 0.000000 -0.006809 +v 0.005316 0.000000 -0.006910 +v 0.005449 0.000000 -0.006786 +v -0.004728 0.000000 -0.006454 +v 0.005577 0.000000 -0.006659 +v 0.005698 0.000000 -0.006530 +v 0.005811 0.000000 -0.006402 +v -0.005065 0.000000 -0.006067 +v 0.005914 0.000000 -0.006276 +v 0.006006 0.000000 -0.006153 +v 0.006086 0.000000 -0.006034 +v -0.005374 0.000000 -0.005648 +v 0.000379 0.000000 -0.005960 +v 0.000555 0.000000 -0.005964 +v 0.006151 0.000000 -0.005923 +v 0.000726 0.000000 -0.005962 +v 0.000892 0.000000 -0.005954 +v 0.000201 0.000000 -0.005950 +v 0.001047 0.000000 -0.005939 +v 0.000024 0.000000 -0.005935 +v 0.001189 0.000000 -0.005917 +v -0.000149 0.000000 -0.005913 +v 0.006194 0.000000 -0.005837 +v 0.001355 0.000000 -0.005880 +v -0.000315 0.000000 -0.005887 +v -0.000472 0.000000 -0.005855 +v 0.001521 0.000000 -0.005830 +v -0.000617 0.000000 -0.005818 +v 0.006240 0.000000 -0.005735 +v 0.001685 0.000000 -0.005768 +v -0.000748 0.000000 -0.005776 +v -0.001074 0.000000 -0.005639 +v 0.001845 0.000000 -0.005696 +v 0.006287 0.000000 -0.005619 +v 0.001999 0.000000 -0.005614 +v -0.005654 0.000000 -0.005198 +v -0.001369 0.000000 -0.005477 +v 0.006334 0.000000 -0.005495 +v 0.002147 0.000000 -0.005524 +v 0.002287 0.000000 -0.005425 +v 0.006381 0.000000 -0.005365 +v -0.001634 0.000000 -0.005288 +v 0.002417 0.000000 -0.005320 +v 0.006425 0.000000 -0.005235 +v 0.002536 0.000000 -0.005210 +v -0.001871 0.000000 -0.005071 +v 0.006465 0.000000 -0.005106 +v 0.002643 0.000000 -0.005094 +v -0.005902 0.000000 -0.004719 +v 0.006502 0.000000 -0.004984 +v 0.002736 0.000000 -0.004975 +v -0.002080 0.000000 -0.004826 +v 0.006532 0.000000 -0.004872 +v 0.002814 0.000000 -0.004852 +v 0.006555 0.000000 -0.004774 +v 0.002832 0.000000 -0.004821 +v -0.002261 0.000000 -0.004550 +v 0.002852 0.000000 -0.004791 +v 0.002873 0.000000 -0.004762 +v 0.006570 0.000000 -0.004694 +v 0.002895 0.000000 -0.004734 +v 0.002917 0.000000 -0.004707 +v -0.006121 0.000000 -0.004212 +v 0.002940 0.000000 -0.004683 +v 0.006575 0.000000 -0.004634 +v 0.002963 0.000000 -0.004660 +v 0.002985 0.000000 -0.004641 +v 0.003007 0.000000 -0.004624 +v 0.006567 0.000000 -0.004620 +v 0.003028 0.000000 -0.004610 +v 0.006541 0.000000 -0.004608 +v 0.003048 0.000000 -0.004599 +v 0.006497 0.000000 -0.004598 +v 0.003067 0.000000 -0.004593 +v 0.006431 0.000000 -0.004588 +v 0.003099 0.000000 -0.004589 +v 0.003161 0.000000 -0.004584 +v 0.006342 0.000000 -0.004581 +v 0.003249 0.000000 -0.004580 +v 0.006227 0.000000 -0.004574 +v 0.003362 0.000000 -0.004576 +v 0.003497 0.000000 -0.004572 +v 0.006084 0.000000 -0.004569 +v 0.003652 0.000000 -0.004569 +v 0.003825 0.000000 -0.004566 +v 0.005912 0.000000 -0.004564 +v 0.004013 0.000000 -0.004563 +v 0.005708 0.000000 -0.004562 +v 0.004216 0.000000 -0.004561 +v 0.005471 0.000000 -0.004559 +v 0.004431 0.000000 -0.004559 +v 0.005197 0.000000 -0.004558 +v 0.004654 0.000000 -0.004558 +v 0.004886 0.000000 -0.004558 +v -0.002416 0.000000 -0.004243 +v -0.002545 0.000000 -0.003903 +v -0.006307 0.000000 -0.003677 +v -0.002650 0.000000 -0.003528 +v -0.006461 0.000000 -0.003118 +v -0.002731 0.000000 -0.003118 +v -0.002789 0.000000 -0.002671 +v -0.006581 0.000000 -0.002534 +v -0.002825 0.000000 -0.002186 +v 0.000760 0.000000 -0.002637 +v 0.001416 0.000000 -0.002636 +v 0.001130 0.000000 -0.002640 +v 0.000424 0.000000 -0.002627 +v 0.001674 0.000000 -0.002630 +v 0.001910 0.000000 -0.002620 +v 0.000118 0.000000 -0.002607 +v 0.002126 0.000000 -0.002605 +v -0.000164 0.000000 -0.002577 +v 0.002328 0.000000 -0.002585 +v 0.002518 0.000000 -0.002559 +v -0.000427 0.000000 -0.002533 +v 0.002702 0.000000 -0.002526 +v -0.006651 0.000000 -0.002063 +v -0.000678 0.000000 -0.002476 +v 0.002883 0.000000 -0.002485 +v 0.003065 0.000000 -0.002438 +v -0.000921 0.000000 -0.002404 +v 0.003251 0.000000 -0.002381 +v -0.001163 0.000000 -0.002314 +v 0.003447 0.000000 -0.002315 +v 0.003655 0.000000 -0.002239 +v -0.001408 0.000000 -0.002206 +v 0.003944 0.000000 -0.002119 +v -0.001664 0.000000 -0.002078 +v -0.002860 0.000000 -0.001374 +v 0.004230 0.000000 -0.001977 +v -0.001934 0.000000 -0.001929 +v -0.006705 0.000000 -0.001561 +v 0.004511 0.000000 -0.001817 +v -0.002225 0.000000 -0.001757 +v 0.004783 0.000000 -0.001638 +v -0.002238 0.000000 -0.001749 +v -0.002272 0.000000 -0.001728 +v -0.002324 0.000000 -0.001697 +v -0.002390 0.000000 -0.001658 +v -0.002464 0.000000 -0.001613 +v 0.005047 0.000000 -0.001444 +v -0.002543 0.000000 -0.001565 +v -0.002622 0.000000 -0.001518 +v -0.006744 0.000000 -0.001034 +v -0.002696 0.000000 -0.001473 +v -0.002761 0.000000 -0.001434 +v 0.005298 0.000000 -0.001237 +v -0.002814 0.000000 -0.001402 +v -0.002848 0.000000 -0.001381 +v 0.005535 0.000000 -0.001017 +v -0.006767 0.000000 -0.000490 +v 0.005756 0.000000 -0.000788 +v 0.000377 0.000000 -0.000542 +v 0.000564 0.000000 -0.000543 +v 0.005959 0.000000 -0.000552 +v 0.000753 0.000000 -0.000537 +v 0.006141 0.000000 -0.000309 +v 0.000197 0.000000 -0.000535 +v 0.000943 0.000000 -0.000524 +v 0.000027 0.000000 -0.000520 +v 0.001128 0.000000 -0.000505 +v -0.000131 0.000000 -0.000498 +v 0.001305 0.000000 -0.000480 +v -0.000271 0.000000 -0.000468 +v -0.006776 -0.000000 0.000063 +v 0.001472 0.000000 -0.000448 +v -0.000605 0.000000 -0.000365 +v 0.001625 0.000000 -0.000411 +v 0.001760 0.000000 -0.000368 +v 0.002062 0.000000 -0.000243 +v -0.000916 0.000000 -0.000232 +v 0.006300 0.000000 -0.000063 +v 0.002336 0.000000 -0.000100 +v -0.001201 0.000000 -0.000072 +v 0.002584 -0.000000 0.000060 +v -0.001461 -0.000000 0.000115 +v 0.006434 -0.000000 0.000185 +v 0.002805 -0.000000 0.000238 +v -0.006770 -0.000000 0.000619 +v -0.001694 -0.000000 0.000327 +v 0.006517 -0.000000 0.000363 +v 0.003000 -0.000000 0.000434 +v -0.001899 -0.000000 0.000563 +v 0.006590 -0.000000 0.000534 +v 0.003168 -0.000000 0.000649 +v 0.006653 -0.000000 0.000701 +v -0.002075 -0.000000 0.000822 +v -0.006749 -0.000000 0.001168 +v 0.003310 -0.000000 0.000882 +v 0.006708 -0.000000 0.000866 +v -0.002222 -0.000000 0.001103 +v 0.006755 -0.000000 0.001031 +v 0.003427 -0.000000 0.001133 +v 0.006793 -0.000000 0.001199 +v -0.002338 -0.000000 0.001404 +v 0.003516 -0.000000 0.001403 +v -0.006714 -0.000000 0.001704 +v 0.006825 -0.000000 0.001373 +v 0.006849 -0.000000 0.001555 +v 0.003581 -0.000000 0.001692 +v -0.002422 -0.000000 0.001724 +v 0.006868 -0.000000 0.001748 +v 0.003619 -0.000000 0.002000 +v -0.006664 -0.000000 0.002219 +v -0.002473 -0.000000 0.002062 +v 0.006881 -0.000000 0.001954 +v 0.006889 -0.000000 0.002176 +v 0.003632 -0.000000 0.002327 +v -0.002490 -0.000000 0.002416 +v 0.006893 -0.000000 0.002416 +v -0.006601 -0.000000 0.002706 +v 0.003602 -0.000000 0.002771 +v -0.002467 -0.000000 0.002809 +v 0.006890 -0.000000 0.002666 +v 0.006882 -0.000000 0.002899 +v -0.006524 -0.000000 0.003156 +v 0.003514 -0.000000 0.003190 +v -0.002400 -0.000000 0.003181 +v 0.006869 -0.000000 0.003118 +v 0.006849 -0.000000 0.003325 +v -0.006434 -0.000000 0.003563 +v -0.002291 -0.000000 0.003530 +v 0.003372 -0.000000 0.003582 +v 0.006823 -0.000000 0.003523 +v 0.006789 -0.000000 0.003714 +v -0.002142 -0.000000 0.003854 +v -0.006302 -0.000000 0.004013 +v 0.003181 -0.000000 0.003942 +v 0.006747 -0.000000 0.003900 +v -0.001953 -0.000000 0.004152 +v 0.006697 -0.000000 0.004083 +v 0.002942 -0.000000 0.004268 +v -0.006145 -0.000000 0.004444 +v 0.006637 -0.000000 0.004267 +v -0.001729 -0.000000 0.004420 +v 0.006568 -0.000000 0.004453 +v 0.002660 -0.000000 0.004556 +v -0.001470 -0.000000 0.004657 +v -0.005961 -0.000000 0.004855 +v 0.006488 -0.000000 0.004643 +v 0.002340 -0.000000 0.004804 +v 0.006398 -0.000000 0.004840 +v -0.001178 -0.000000 0.004861 +v 0.001984 -0.000000 0.005007 +v 0.006198 -0.000000 0.005228 +v -0.005752 -0.000000 0.005246 +v -0.000856 -0.000000 0.005031 +v 0.001596 -0.000000 0.005163 +v -0.000505 -0.000000 0.005164 +v 0.001179 -0.000000 0.005267 +v -0.000126 -0.000000 0.005258 +v 0.005969 -0.000000 0.005596 +v -0.005517 -0.000000 0.005615 +v 0.000277 -0.000000 0.005311 +v 0.000738 -0.000000 0.005318 +v 0.005715 -0.000000 0.005942 +v -0.005259 -0.000000 0.005962 +v 0.005434 -0.000000 0.006265 +v -0.004977 -0.000000 0.006286 +v 0.005127 -0.000000 0.006566 +v -0.004673 -0.000000 0.006587 +v 0.004797 -0.000000 0.006844 +v -0.004346 -0.000000 0.006863 +v 0.004441 -0.000000 0.007099 +v -0.003997 -0.000000 0.007113 +v 0.004063 -0.000000 0.007329 +v -0.003628 -0.000000 0.007338 +v 0.003661 -0.000000 0.007535 +v -0.003237 -0.000000 0.007536 +v 0.003237 -0.000000 0.007716 +v -0.002828 -0.000000 0.007708 +v -0.002399 -0.000000 0.007857 +v 0.002791 -0.000000 0.007871 +v -0.001953 -0.000000 0.007982 +v 0.002325 -0.000000 0.008000 +v -0.001493 -0.000000 0.008082 +v 0.001862 -0.000000 0.008099 +v -0.001023 -0.000000 0.008159 +v 0.001388 -0.000000 0.008173 +v -0.000544 -0.000000 0.008211 +v 0.000908 -0.000000 0.008220 +v -0.000061 -0.000000 0.008239 +v 0.000424 -0.000000 0.008242 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.026 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 9//1 10//1 8//1 +f 11//1 10//1 9//1 +f 11//1 12//1 10//1 +f 13//1 12//1 11//1 +f 13//1 14//1 12//1 +f 15//1 14//1 13//1 +f 15//1 16//1 14//1 +f 17//1 16//1 15//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 19//1 20//1 18//1 +f 21//1 20//1 19//1 +f 21//1 22//1 20//1 +f 23//1 22//1 21//1 +f 23//1 24//1 22//1 +f 25//1 24//1 23//1 +f 25//1 26//1 24//1 +f 27//1 26//1 25//1 +f 27//1 28//1 26//1 +f 29//1 28//1 27//1 +f 29//1 30//1 28//1 +f 31//1 30//1 29//1 +f 32//1 30//1 31//1 +f 32//1 33//1 30//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 36//1 35//1 34//1 +f 36//1 37//1 35//1 +f 36//1 38//1 37//1 +f 39//1 38//1 36//1 +f 39//1 40//1 38//1 +f 39//1 41//1 40//1 +f 39//1 42//1 41//1 +f 43//1 42//1 39//1 +f 43//1 44//1 42//1 +f 43//1 45//1 44//1 +f 46//1 45//1 43//1 +f 46//1 47//1 45//1 +f 46//1 48//1 47//1 +f 46//1 49//1 48//1 +f 50//1 49//1 46//1 +f 50//1 51//1 49//1 +f 50//1 52//1 51//1 +f 50//1 53//1 52//1 +f 54//1 55//1 50//1 +f 55//1 56//1 50//1 +f 56//1 53//1 50//1 +f 56//1 57//1 53//1 +f 58//1 57//1 56//1 +f 59//1 57//1 58//1 +f 54//1 60//1 55//1 +f 61//1 57//1 59//1 +f 54//1 62//1 60//1 +f 63//1 57//1 61//1 +f 54//1 64//1 62//1 +f 63//1 65//1 57//1 +f 66//1 65//1 63//1 +f 54//1 67//1 64//1 +f 54//1 68//1 67//1 +f 69//1 65//1 66//1 +f 54//1 70//1 68//1 +f 69//1 71//1 65//1 +f 72//1 71//1 69//1 +f 54//1 73//1 70//1 +f 54//1 74//1 73//1 +f 75//1 71//1 72//1 +f 75//1 76//1 71//1 +f 77//1 76//1 75//1 +f 78//1 74//1 54//1 +f 78//1 79//1 74//1 +f 77//1 80//1 76//1 +f 81//1 80//1 77//1 +f 82//1 80//1 81//1 +f 82//1 83//1 80//1 +f 78//1 84//1 79//1 +f 85//1 83//1 82//1 +f 85//1 86//1 83//1 +f 87//1 86//1 85//1 +f 78//1 88//1 84//1 +f 87//1 89//1 86//1 +f 90//1 89//1 87//1 +f 91//1 88//1 78//1 +f 90//1 92//1 89//1 +f 93//1 92//1 90//1 +f 91//1 94//1 88//1 +f 93//1 95//1 92//1 +f 96//1 95//1 93//1 +f 96//1 97//1 95//1 +f 98//1 97//1 96//1 +f 91//1 99//1 94//1 +f 100//1 97//1 98//1 +f 101//1 97//1 100//1 +f 101//1 102//1 97//1 +f 103//1 102//1 101//1 +f 104//1 102//1 103//1 +f 105//1 99//1 91//1 +f 106//1 102//1 104//1 +f 106//1 107//1 102//1 +f 108//1 107//1 106//1 +f 109//1 107//1 108//1 +f 110//1 107//1 109//1 +f 110//1 111//1 107//1 +f 112//1 111//1 110//1 +f 112//1 113//1 111//1 +f 114//1 113//1 112//1 +f 114//1 115//1 113//1 +f 116//1 115//1 114//1 +f 116//1 117//1 115//1 +f 118//1 117//1 116//1 +f 119//1 117//1 118//1 +f 119//1 120//1 117//1 +f 121//1 120//1 119//1 +f 121//1 122//1 120//1 +f 123//1 122//1 121//1 +f 124//1 122//1 123//1 +f 124//1 125//1 122//1 +f 126//1 125//1 124//1 +f 127//1 125//1 126//1 +f 127//1 128//1 125//1 +f 129//1 128//1 127//1 +f 129//1 130//1 128//1 +f 131//1 130//1 129//1 +f 131//1 132//1 130//1 +f 133//1 132//1 131//1 +f 133//1 134//1 132//1 +f 135//1 134//1 133//1 +f 135//1 136//1 134//1 +f 105//1 137//1 99//1 +f 105//1 138//1 137//1 +f 139//1 138//1 105//1 +f 139//1 140//1 138//1 +f 141//1 140//1 139//1 +f 141//1 142//1 140//1 +f 141//1 143//1 142//1 +f 144//1 143//1 141//1 +f 144//1 145//1 143//1 +f 146//1 147//1 148//1 +f 149//1 147//1 146//1 +f 149//1 150//1 147//1 +f 149//1 151//1 150//1 +f 152//1 151//1 149//1 +f 152//1 153//1 151//1 +f 154//1 153//1 152//1 +f 154//1 155//1 153//1 +f 154//1 156//1 155//1 +f 157//1 156//1 154//1 +f 157//1 158//1 156//1 +f 159//1 145//1 144//1 +f 160//1 158//1 157//1 +f 160//1 161//1 158//1 +f 160//1 162//1 161//1 +f 163//1 162//1 160//1 +f 163//1 164//1 162//1 +f 165//1 164//1 163//1 +f 165//1 166//1 164//1 +f 165//1 167//1 166//1 +f 168//1 167//1 165//1 +f 168//1 169//1 167//1 +f 170//1 169//1 168//1 +f 159//1 171//1 145//1 +f 170//1 172//1 169//1 +f 173//1 172//1 170//1 +f 174//1 171//1 159//1 +f 173//1 175//1 172//1 +f 176//1 175//1 173//1 +f 176//1 177//1 175//1 +f 178//1 177//1 176//1 +f 179//1 177//1 178//1 +f 180//1 177//1 179//1 +f 181//1 177//1 180//1 +f 182//1 177//1 181//1 +f 182//1 183//1 177//1 +f 184//1 183//1 182//1 +f 185//1 183//1 184//1 +f 186//1 171//1 174//1 +f 187//1 183//1 185//1 +f 188//1 183//1 187//1 +f 188//1 189//1 183//1 +f 190//1 189//1 188//1 +f 191//1 189//1 190//1 +f 171//1 189//1 191//1 +f 186//1 189//1 171//1 +f 186//1 192//1 189//1 +f 193//1 192//1 186//1 +f 193//1 194//1 192//1 +f 193//1 195//1 194//1 +f 195//1 196//1 194//1 +f 196//1 197//1 194//1 +f 196//1 198//1 197//1 +f 198//1 199//1 197//1 +f 193//1 200//1 195//1 +f 201//1 199//1 198//1 +f 193//1 202//1 200//1 +f 203//1 199//1 201//1 +f 193//1 204//1 202//1 +f 205//1 199//1 203//1 +f 193//1 206//1 204//1 +f 207//1 206//1 193//1 +f 208//1 199//1 205//1 +f 207//1 209//1 206//1 +f 210//1 199//1 208//1 +f 211//1 199//1 210//1 +f 212//1 199//1 211//1 +f 207//1 213//1 209//1 +f 212//1 214//1 199//1 +f 215//1 214//1 212//1 +f 207//1 216//1 213//1 +f 217//1 214//1 215//1 +f 207//1 218//1 216//1 +f 217//1 219//1 214//1 +f 220//1 219//1 217//1 +f 221//1 218//1 207//1 +f 221//1 222//1 218//1 +f 220//1 223//1 219//1 +f 224//1 223//1 220//1 +f 221//1 225//1 222//1 +f 224//1 226//1 223//1 +f 227//1 226//1 224//1 +f 227//1 228//1 226//1 +f 221//1 229//1 225//1 +f 230//1 229//1 221//1 +f 231//1 228//1 227//1 +f 231//1 232//1 228//1 +f 230//1 233//1 229//1 +f 231//1 234//1 232//1 +f 235//1 234//1 231//1 +f 235//1 236//1 234//1 +f 230//1 237//1 233//1 +f 238//1 236//1 235//1 +f 239//1 237//1 230//1 +f 238//1 240//1 236//1 +f 238//1 241//1 240//1 +f 242//1 241//1 238//1 +f 239//1 243//1 237//1 +f 242//1 244//1 241//1 +f 245//1 244//1 242//1 +f 246//1 243//1 239//1 +f 246//1 247//1 243//1 +f 245//1 248//1 244//1 +f 245//1 249//1 248//1 +f 250//1 249//1 245//1 +f 246//1 251//1 247//1 +f 250//1 252//1 249//1 +f 253//1 251//1 246//1 +f 254//1 252//1 250//1 +f 253//1 255//1 251//1 +f 254//1 256//1 252//1 +f 254//1 257//1 256//1 +f 258//1 255//1 253//1 +f 259//1 257//1 254//1 +f 258//1 260//1 255//1 +f 259//1 261//1 257//1 +f 259//1 262//1 261//1 +f 263//1 260//1 258//1 +f 263//1 264//1 260//1 +f 265//1 262//1 259//1 +f 265//1 266//1 262//1 +f 265//1 267//1 266//1 +f 263//1 268//1 264//1 +f 269//1 268//1 263//1 +f 270//1 267//1 265//1 +f 270//1 271//1 267//1 +f 269//1 272//1 268//1 +f 270//1 273//1 271//1 +f 274//1 273//1 270//1 +f 275//1 272//1 269//1 +f 274//1 276//1 273//1 +f 275//1 277//1 272//1 +f 274//1 278//1 276//1 +f 279//1 278//1 274//1 +f 275//1 280//1 277//1 +f 281//1 280//1 275//1 +f 279//1 282//1 278//1 +f 283//1 282//1 279//1 +f 283//1 284//1 282//1 +f 281//1 285//1 280//1 +f 286//1 284//1 283//1 +f 286//1 287//1 284//1 +f 288//1 285//1 281//1 +f 288//1 289//1 285//1 +f 290//1 287//1 286//1 +f 288//1 291//1 289//1 +f 292//1 287//1 290//1 +f 288//1 293//1 291//1 +f 292//1 294//1 287//1 +f 295//1 293//1 288//1 +f 295//1 296//1 293//1 +f 297//1 294//1 292//1 +f 295//1 297//1 296//1 +f 295//1 294//1 297//1 +f 295//1 298//1 294//1 +f 299//1 298//1 295//1 +f 299//1 300//1 298//1 +f 301//1 300//1 299//1 +f 301//1 302//1 300//1 +f 303//1 302//1 301//1 +f 303//1 304//1 302//1 +f 305//1 304//1 303//1 +f 305//1 306//1 304//1 +f 307//1 306//1 305//1 +f 307//1 308//1 306//1 +f 309//1 308//1 307//1 +f 309//1 310//1 308//1 +f 311//1 310//1 309//1 +f 311//1 312//1 310//1 +f 313//1 312//1 311//1 +f 314//1 312//1 313//1 +f 314//1 315//1 312//1 +f 316//1 315//1 314//1 +f 316//1 317//1 315//1 +f 318//1 317//1 316//1 +f 318//1 319//1 317//1 +f 320//1 319//1 318//1 +f 320//1 321//1 319//1 +f 322//1 321//1 320//1 +f 322//1 323//1 321//1 +f 324//1 323//1 322//1 +f 324//1 325//1 323//1 diff --git a/alphanumeric/7.mtl b/alphanumeric/7.mtl new file mode 100644 index 0000000..644e723 --- /dev/null +++ b/alphanumeric/7.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.027 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/7.obj b/alphanumeric/7.obj new file mode 100644 index 0000000..ef63a2f --- /dev/null +++ b/alphanumeric/7.obj @@ -0,0 +1,373 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 7.mtl +o Curve.008_Curve.068 +v -0.007380 0.000000 -0.008312 +v -0.000037 0.000000 -0.008340 +v -0.007380 0.000000 -0.008340 +v 0.007306 0.000000 -0.008340 +v 0.007307 0.000000 -0.008336 +v 0.007309 0.000000 -0.008326 +v 0.007312 0.000000 -0.008310 +v -0.007380 0.000000 -0.008235 +v 0.007316 0.000000 -0.008290 +v 0.007320 0.000000 -0.008268 +v 0.007325 0.000000 -0.008244 +v 0.007329 0.000000 -0.008220 +v -0.007380 0.000000 -0.008117 +v 0.007334 0.000000 -0.008198 +v 0.007338 0.000000 -0.008178 +v 0.007341 0.000000 -0.008162 +v 0.007343 0.000000 -0.008151 +v 0.007343 0.000000 -0.008148 +v 0.007348 0.000000 -0.008112 +v -0.007380 0.000000 -0.007970 +v 0.007353 0.000000 -0.008058 +v 0.007357 0.000000 -0.007987 +v 0.007361 0.000000 -0.007902 +v -0.007380 0.000000 -0.007804 +v 0.007365 0.000000 -0.007804 +v -0.007380 0.000000 -0.007627 +v 0.007369 0.000000 -0.007693 +v 0.007372 0.000000 -0.007572 +v -0.007380 0.000000 -0.007450 +v 0.007375 0.000000 -0.007443 +v -0.007380 0.000000 -0.007284 +v 0.007377 0.000000 -0.007306 +v 0.007379 0.000000 -0.007165 +v -0.007380 0.000000 -0.007137 +v 0.007380 0.000000 -0.007019 +v -0.007380 0.000000 -0.007020 +v -0.007380 0.000000 -0.006942 +v 0.007380 0.000000 -0.006871 +v -0.007380 0.000000 -0.006914 +v -0.007380 0.000000 -0.006722 +v 0.007380 0.000000 -0.005792 +v -0.007380 0.000000 -0.006536 +v -0.007379 0.000000 -0.006360 +v -0.007378 0.000000 -0.006195 +v -0.007377 0.000000 -0.006042 +v -0.007376 0.000000 -0.005904 +v -0.007374 0.000000 -0.005782 +v 0.007371 0.000000 -0.005783 +v 0.007347 0.000000 -0.005758 +v -0.007372 0.000000 -0.005678 +v 0.007309 0.000000 -0.005722 +v 0.007261 0.000000 -0.005676 +v -0.007370 0.000000 -0.005593 +v 0.007208 0.000000 -0.005624 +v 0.007151 0.000000 -0.005568 +v -0.007367 0.000000 -0.005530 +v 0.007094 0.000000 -0.005513 +v -0.007365 0.000000 -0.005490 +v 0.007041 0.000000 -0.005461 +v -0.007362 0.000000 -0.005475 +v -0.007313 0.000000 -0.005473 +v -0.007174 0.000000 -0.005471 +v -0.006950 0.000000 -0.005467 +v -0.006649 0.000000 -0.005464 +v -0.006277 0.000000 -0.005460 +v 0.006993 0.000000 -0.005415 +v -0.005839 0.000000 -0.005455 +v -0.005341 0.000000 -0.005450 +v -0.004792 0.000000 -0.005445 +v -0.004195 0.000000 -0.005440 +v -0.003558 0.000000 -0.005435 +v -0.002888 0.000000 -0.005430 +v -0.002189 0.000000 -0.005425 +v -0.002088 0.000000 -0.005425 +v -0.001807 0.000000 -0.005423 +v -0.001383 0.000000 -0.005421 +v -0.000852 0.000000 -0.005417 +v -0.000248 0.000000 -0.005414 +v 0.006955 0.000000 -0.005378 +v 0.000391 0.000000 -0.005410 +v 0.001030 0.000000 -0.005407 +v 0.001633 0.000000 -0.005403 +v 0.002164 0.000000 -0.005400 +v 0.002588 0.000000 -0.005397 +v 0.002869 0.000000 -0.005395 +v 0.002971 0.000000 -0.005395 +v 0.002252 0.000000 -0.004620 +v 0.006930 0.000000 -0.005354 +v 0.006921 0.000000 -0.005345 +v 0.006634 0.000000 -0.005063 +v 0.006339 0.000000 -0.004763 +v 0.006038 0.000000 -0.004449 +v 0.001307 0.000000 -0.003575 +v 0.005735 0.000000 -0.004124 +v 0.005433 0.000000 -0.003792 +v 0.005133 0.000000 -0.003455 +v 0.000459 0.000000 -0.002577 +v 0.004839 0.000000 -0.003117 +v 0.004554 0.000000 -0.002781 +v 0.004280 0.000000 -0.002450 +v -0.000300 0.000000 -0.001617 +v 0.004020 0.000000 -0.002127 +v 0.003777 0.000000 -0.001816 +v 0.003554 0.000000 -0.001519 +v -0.000976 0.000000 -0.000681 +v 0.003178 0.000000 -0.000995 +v 0.002822 0.000000 -0.000467 +v -0.001574 -0.000000 0.000242 +v 0.002486 -0.000000 0.000065 +v 0.002170 -0.000000 0.000600 +v -0.002102 -0.000000 0.001164 +v 0.001875 -0.000000 0.001139 +v 0.001599 -0.000000 0.001683 +v -0.002564 -0.000000 0.002095 +v 0.001344 -0.000000 0.002230 +v -0.002969 -0.000000 0.003047 +v 0.001108 -0.000000 0.002781 +v 0.000893 -0.000000 0.003337 +v -0.003321 -0.000000 0.004033 +v 0.000697 -0.000000 0.003897 +v 0.000522 -0.000000 0.004461 +v -0.003627 -0.000000 0.005064 +v 0.000366 -0.000000 0.005029 +v 0.000307 -0.000000 0.005271 +v -0.003893 -0.000000 0.006150 +v 0.000248 -0.000000 0.005532 +v 0.000190 -0.000000 0.005805 +v 0.000134 -0.000000 0.006086 +v 0.000081 -0.000000 0.006369 +v -0.004124 -0.000000 0.007305 +v 0.000032 -0.000000 0.006648 +v -0.000011 -0.000000 0.006918 +v -0.000049 -0.000000 0.007174 +v -0.000081 -0.000000 0.007408 +v -0.004146 -0.000000 0.007430 +v -0.000104 -0.000000 0.007618 +v -0.004166 -0.000000 0.007551 +v -0.004184 -0.000000 0.007666 +v -0.000119 -0.000000 0.007796 +v -0.004201 -0.000000 0.007776 +v -0.004215 -0.000000 0.007877 +v -0.000124 -0.000000 0.007937 +v -0.004228 -0.000000 0.007969 +v -0.000125 -0.000000 0.007971 +v -0.004239 -0.000000 0.008051 +v -0.000126 -0.000000 0.008006 +v -0.000129 -0.000000 0.008040 +v -0.000132 -0.000000 0.008073 +v -0.004246 -0.000000 0.008121 +v -0.000136 -0.000000 0.008106 +v -0.000141 -0.000000 0.008136 +v -0.004252 -0.000000 0.008179 +v -0.000146 -0.000000 0.008165 +v -0.000151 -0.000000 0.008191 +v -0.004254 -0.000000 0.008223 +v -0.000158 -0.000000 0.008215 +v -0.000165 -0.000000 0.008236 +v -0.004253 -0.000000 0.008253 +v -0.000172 -0.000000 0.008253 +v -0.004248 -0.000000 0.008265 +v -0.000180 -0.000000 0.008265 +v -0.004224 -0.000000 0.008272 +v -0.000197 -0.000000 0.008280 +v -0.004166 -0.000000 0.008278 +v -0.004074 -0.000000 0.008284 +v -0.000234 -0.000000 0.008292 +v -0.003954 -0.000000 0.008290 +v -0.003807 -0.000000 0.008296 +v -0.000291 -0.000000 0.008303 +v -0.003634 -0.000000 0.008301 +v -0.003440 -0.000000 0.008306 +v -0.000373 -0.000000 0.008312 +v -0.003226 -0.000000 0.008311 +v -0.002994 -0.000000 0.008316 +v -0.000481 -0.000000 0.008319 +v -0.002748 -0.000000 0.008320 +v -0.000618 -0.000000 0.008325 +v -0.002489 -0.000000 0.008324 +v -0.002220 -0.000000 0.008328 +v -0.000789 -0.000000 0.008328 +v -0.001847 -0.000000 0.008330 +v -0.000994 -0.000000 0.008331 +v -0.001520 -0.000000 0.008332 +v -0.001237 -0.000000 0.008332 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.027 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 8//1 7//1 1//1 +f 8//1 9//1 7//1 +f 8//1 10//1 9//1 +f 8//1 11//1 10//1 +f 8//1 12//1 11//1 +f 13//1 12//1 8//1 +f 13//1 14//1 12//1 +f 13//1 15//1 14//1 +f 13//1 16//1 15//1 +f 13//1 17//1 16//1 +f 13//1 18//1 17//1 +f 13//1 19//1 18//1 +f 20//1 19//1 13//1 +f 20//1 21//1 19//1 +f 20//1 22//1 21//1 +f 20//1 23//1 22//1 +f 24//1 23//1 20//1 +f 24//1 25//1 23//1 +f 26//1 25//1 24//1 +f 26//1 27//1 25//1 +f 26//1 28//1 27//1 +f 29//1 28//1 26//1 +f 29//1 30//1 28//1 +f 31//1 30//1 29//1 +f 31//1 32//1 30//1 +f 31//1 33//1 32//1 +f 34//1 33//1 31//1 +f 34//1 35//1 33//1 +f 36//1 35//1 34//1 +f 37//1 35//1 36//1 +f 37//1 38//1 35//1 +f 39//1 38//1 37//1 +f 40//1 38//1 39//1 +f 40//1 41//1 38//1 +f 42//1 41//1 40//1 +f 43//1 41//1 42//1 +f 44//1 41//1 43//1 +f 45//1 41//1 44//1 +f 46//1 41//1 45//1 +f 47//1 41//1 46//1 +f 47//1 48//1 41//1 +f 47//1 49//1 48//1 +f 50//1 49//1 47//1 +f 50//1 51//1 49//1 +f 50//1 52//1 51//1 +f 53//1 52//1 50//1 +f 53//1 54//1 52//1 +f 53//1 55//1 54//1 +f 56//1 55//1 53//1 +f 56//1 57//1 55//1 +f 58//1 57//1 56//1 +f 58//1 59//1 57//1 +f 60//1 59//1 58//1 +f 61//1 59//1 60//1 +f 62//1 59//1 61//1 +f 63//1 59//1 62//1 +f 64//1 59//1 63//1 +f 65//1 59//1 64//1 +f 65//1 66//1 59//1 +f 67//1 66//1 65//1 +f 68//1 66//1 67//1 +f 69//1 66//1 68//1 +f 70//1 66//1 69//1 +f 71//1 66//1 70//1 +f 72//1 66//1 71//1 +f 73//1 66//1 72//1 +f 74//1 66//1 73//1 +f 75//1 66//1 74//1 +f 76//1 66//1 75//1 +f 77//1 66//1 76//1 +f 78//1 66//1 77//1 +f 78//1 79//1 66//1 +f 80//1 79//1 78//1 +f 81//1 79//1 80//1 +f 82//1 79//1 81//1 +f 83//1 79//1 82//1 +f 84//1 79//1 83//1 +f 85//1 79//1 84//1 +f 86//1 79//1 85//1 +f 87//1 79//1 86//1 +f 87//1 88//1 79//1 +f 87//1 89//1 88//1 +f 87//1 90//1 89//1 +f 87//1 91//1 90//1 +f 87//1 92//1 91//1 +f 93//1 92//1 87//1 +f 93//1 94//1 92//1 +f 93//1 95//1 94//1 +f 93//1 96//1 95//1 +f 97//1 96//1 93//1 +f 97//1 98//1 96//1 +f 97//1 99//1 98//1 +f 97//1 100//1 99//1 +f 101//1 100//1 97//1 +f 101//1 102//1 100//1 +f 101//1 103//1 102//1 +f 101//1 104//1 103//1 +f 105//1 104//1 101//1 +f 105//1 106//1 104//1 +f 105//1 107//1 106//1 +f 108//1 107//1 105//1 +f 108//1 109//1 107//1 +f 108//1 110//1 109//1 +f 111//1 110//1 108//1 +f 111//1 112//1 110//1 +f 111//1 113//1 112//1 +f 114//1 113//1 111//1 +f 114//1 115//1 113//1 +f 116//1 115//1 114//1 +f 116//1 117//1 115//1 +f 116//1 118//1 117//1 +f 119//1 118//1 116//1 +f 119//1 120//1 118//1 +f 119//1 121//1 120//1 +f 122//1 121//1 119//1 +f 122//1 123//1 121//1 +f 122//1 124//1 123//1 +f 125//1 124//1 122//1 +f 125//1 126//1 124//1 +f 125//1 127//1 126//1 +f 125//1 128//1 127//1 +f 125//1 129//1 128//1 +f 130//1 129//1 125//1 +f 130//1 131//1 129//1 +f 130//1 132//1 131//1 +f 130//1 133//1 132//1 +f 130//1 134//1 133//1 +f 135//1 134//1 130//1 +f 135//1 136//1 134//1 +f 137//1 136//1 135//1 +f 138//1 136//1 137//1 +f 138//1 139//1 136//1 +f 140//1 139//1 138//1 +f 141//1 139//1 140//1 +f 141//1 142//1 139//1 +f 143//1 142//1 141//1 +f 143//1 144//1 142//1 +f 145//1 144//1 143//1 +f 145//1 146//1 144//1 +f 145//1 147//1 146//1 +f 145//1 148//1 147//1 +f 149//1 148//1 145//1 +f 149//1 150//1 148//1 +f 149//1 151//1 150//1 +f 152//1 151//1 149//1 +f 152//1 153//1 151//1 +f 152//1 154//1 153//1 +f 155//1 154//1 152//1 +f 155//1 156//1 154//1 +f 155//1 157//1 156//1 +f 158//1 157//1 155//1 +f 158//1 159//1 157//1 +f 160//1 159//1 158//1 +f 160//1 161//1 159//1 +f 162//1 161//1 160//1 +f 162//1 163//1 161//1 +f 164//1 163//1 162//1 +f 165//1 163//1 164//1 +f 165//1 166//1 163//1 +f 167//1 166//1 165//1 +f 168//1 166//1 167//1 +f 168//1 169//1 166//1 +f 170//1 169//1 168//1 +f 171//1 169//1 170//1 +f 171//1 172//1 169//1 +f 173//1 172//1 171//1 +f 174//1 172//1 173//1 +f 174//1 175//1 172//1 +f 176//1 175//1 174//1 +f 176//1 177//1 175//1 +f 178//1 177//1 176//1 +f 179//1 177//1 178//1 +f 179//1 180//1 177//1 +f 181//1 180//1 179//1 +f 181//1 182//1 180//1 +f 183//1 182//1 181//1 +f 183//1 184//1 182//1 diff --git a/alphanumeric/8.mtl b/alphanumeric/8.mtl new file mode 100644 index 0000000..230e6fa --- /dev/null +++ b/alphanumeric/8.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.028 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/8.obj b/alphanumeric/8.obj new file mode 100644 index 0000000..bf1aebf --- /dev/null +++ b/alphanumeric/8.obj @@ -0,0 +1,897 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 8.mtl +o Curve.007_Curve.077 +v -0.000144 0.000000 -0.008372 +v 0.000385 0.000000 -0.008370 +v 0.000122 0.000000 -0.008373 +v -0.000409 0.000000 -0.008367 +v 0.000639 0.000000 -0.008363 +v -0.000668 0.000000 -0.008358 +v 0.000878 0.000000 -0.008351 +v -0.000914 0.000000 -0.008346 +v 0.001096 0.000000 -0.008336 +v -0.001142 0.000000 -0.008330 +v 0.001289 0.000000 -0.008316 +v -0.001348 0.000000 -0.008310 +v 0.001836 0.000000 -0.008231 +v -0.001524 0.000000 -0.008287 +v -0.001951 0.000000 -0.008210 +v 0.002369 0.000000 -0.008112 +v -0.002367 0.000000 -0.008115 +v -0.002772 0.000000 -0.008001 +v 0.002883 0.000000 -0.007961 +v -0.003163 0.000000 -0.007871 +v 0.003376 0.000000 -0.007781 +v -0.003538 0.000000 -0.007724 +v 0.003844 0.000000 -0.007573 +v -0.003896 0.000000 -0.007561 +v 0.004284 0.000000 -0.007340 +v -0.004233 0.000000 -0.007384 +v -0.004549 0.000000 -0.007194 +v 0.004691 0.000000 -0.007083 +v -0.004842 0.000000 -0.006991 +v 0.005062 0.000000 -0.006805 +v -0.005110 0.000000 -0.006776 +v 0.005393 0.000000 -0.006508 +v -0.005350 0.000000 -0.006551 +v -0.005561 0.000000 -0.006315 +v 0.005681 0.000000 -0.006194 +v -0.005695 0.000000 -0.006138 +v 0.005923 0.000000 -0.005864 +v -0.005818 0.000000 -0.005948 +v 0.000066 0.000000 -0.005909 +v -0.005931 0.000000 -0.005748 +v -0.000177 0.000000 -0.005902 +v 0.000307 0.000000 -0.005904 +v 0.000542 0.000000 -0.005888 +v -0.000420 0.000000 -0.005884 +v 0.000771 0.000000 -0.005861 +v -0.000660 0.000000 -0.005855 +v 0.006114 0.000000 -0.005522 +v 0.000989 0.000000 -0.005821 +v -0.000894 0.000000 -0.005817 +v 0.001201 0.000000 -0.005769 +v -0.001119 0.000000 -0.005767 +v 0.001408 0.000000 -0.005705 +v -0.001333 0.000000 -0.005709 +v -0.006034 0.000000 -0.005539 +v -0.001534 0.000000 -0.005640 +v 0.001608 0.000000 -0.005630 +v -0.001718 0.000000 -0.005563 +v 0.001800 0.000000 -0.005544 +v -0.002026 0.000000 -0.005393 +v 0.001983 0.000000 -0.005449 +v -0.006124 0.000000 -0.005324 +v 0.006165 0.000000 -0.005404 +v 0.002154 0.000000 -0.005346 +v 0.006212 0.000000 -0.005276 +v -0.002291 0.000000 -0.005198 +v 0.002313 0.000000 -0.005234 +v -0.006202 0.000000 -0.005105 +v 0.006256 0.000000 -0.005140 +v 0.002458 0.000000 -0.005116 +v -0.002513 0.000000 -0.004979 +v 0.006295 0.000000 -0.004998 +v 0.002589 0.000000 -0.004992 +v -0.006267 0.000000 -0.004883 +v 0.006329 0.000000 -0.004851 +v 0.002703 0.000000 -0.004862 +v -0.002689 0.000000 -0.004741 +v -0.006317 0.000000 -0.004661 +v 0.002800 0.000000 -0.004729 +v 0.006358 0.000000 -0.004701 +v -0.002819 0.000000 -0.004486 +v 0.002878 0.000000 -0.004592 +v 0.006382 0.000000 -0.004551 +v -0.006354 0.000000 -0.004440 +v 0.002971 0.000000 -0.004361 +v 0.006400 0.000000 -0.004401 +v -0.002904 0.000000 -0.004219 +v -0.006374 0.000000 -0.004223 +v 0.006412 0.000000 -0.004255 +v 0.003027 0.000000 -0.004125 +v 0.006418 0.000000 -0.004113 +v -0.006379 0.000000 -0.004012 +v -0.002941 0.000000 -0.003942 +v 0.003049 0.000000 -0.003888 +v 0.006416 0.000000 -0.003977 +v -0.006367 0.000000 -0.003809 +v 0.006408 0.000000 -0.003851 +v -0.002931 0.000000 -0.003661 +v 0.003036 0.000000 -0.003651 +v 0.006367 0.000000 -0.003560 +v -0.006327 0.000000 -0.003526 +v -0.002872 0.000000 -0.003376 +v 0.002990 0.000000 -0.003417 +v 0.006303 0.000000 -0.003279 +v -0.006266 0.000000 -0.003256 +v 0.002912 0.000000 -0.003190 +v -0.002763 0.000000 -0.003093 +v 0.006216 0.000000 -0.003010 +v -0.006183 0.000000 -0.002998 +v 0.002803 0.000000 -0.002970 +v -0.002605 0.000000 -0.002815 +v 0.006106 0.000000 -0.002752 +v -0.006078 0.000000 -0.002752 +v 0.002664 0.000000 -0.002762 +v -0.002395 0.000000 -0.002545 +v 0.002496 0.000000 -0.002567 +v 0.005974 0.000000 -0.002506 +v -0.005951 0.000000 -0.002518 +v 0.002300 0.000000 -0.002389 +v -0.002267 0.000000 -0.002414 +v -0.005801 0.000000 -0.002295 +v 0.005820 0.000000 -0.002273 +v -0.002123 0.000000 -0.002295 +v 0.002078 0.000000 -0.002229 +v -0.001966 0.000000 -0.002186 +v -0.005629 0.000000 -0.002082 +v 0.005645 0.000000 -0.002053 +v 0.001830 0.000000 -0.002091 +v -0.001794 0.000000 -0.002087 +v 0.001688 0.000000 -0.002025 +v -0.001609 0.000000 -0.002000 +v -0.005434 0.000000 -0.001880 +v 0.005449 0.000000 -0.001846 +v 0.001549 0.000000 -0.001965 +v -0.001411 0.000000 -0.001924 +v 0.001412 0.000000 -0.001913 +v -0.001200 0.000000 -0.001859 +v 0.001275 0.000000 -0.001867 +v -0.005216 0.000000 -0.001688 +v 0.001138 0.000000 -0.001827 +v -0.000977 0.000000 -0.001805 +v 0.005232 0.000000 -0.001654 +v 0.000998 0.000000 -0.001794 +v -0.000743 0.000000 -0.001764 +v 0.000853 0.000000 -0.001767 +v 0.000704 0.000000 -0.001746 +v -0.000498 0.000000 -0.001733 +v 0.000548 0.000000 -0.001729 +v -0.000242 0.000000 -0.001715 +v 0.000383 0.000000 -0.001718 +v 0.000209 0.000000 -0.001711 +v 0.000024 0.000000 -0.001709 +v -0.004974 0.000000 -0.001505 +v 0.004995 0.000000 -0.001477 +v -0.004709 0.000000 -0.001332 +v 0.004738 0.000000 -0.001314 +v -0.004419 0.000000 -0.001168 +v 0.004461 0.000000 -0.001168 +v -0.004351 0.000000 -0.001131 +v 0.004385 0.000000 -0.001131 +v 0.004313 0.000000 -0.001095 +v -0.004286 0.000000 -0.001095 +v 0.004245 0.000000 -0.001061 +v -0.004224 0.000000 -0.001059 +v 0.004181 0.000000 -0.001027 +v -0.004166 0.000000 -0.001025 +v 0.004122 0.000000 -0.000995 +v -0.004114 0.000000 -0.000992 +v 0.004069 0.000000 -0.000965 +v -0.004065 0.000000 -0.000962 +v 0.004022 0.000000 -0.000939 +v -0.004023 0.000000 -0.000934 +v 0.003983 0.000000 -0.000914 +v -0.003987 0.000000 -0.000908 +v 0.003951 0.000000 -0.000894 +v -0.003958 0.000000 -0.000887 +v 0.003927 0.000000 -0.000877 +v -0.003937 0.000000 -0.000868 +v 0.003912 0.000000 -0.000864 +v -0.003923 0.000000 -0.000854 +v 0.003907 0.000000 -0.000856 +v 0.003910 0.000000 -0.000849 +v -0.003919 0.000000 -0.000844 +v 0.003917 0.000000 -0.000840 +v -0.003923 0.000000 -0.000835 +v 0.003928 0.000000 -0.000829 +v -0.003934 0.000000 -0.000823 +v 0.003943 0.000000 -0.000817 +v -0.003953 0.000000 -0.000807 +v 0.003962 0.000000 -0.000803 +v -0.003978 0.000000 -0.000789 +v 0.003984 0.000000 -0.000788 +v -0.004009 0.000000 -0.000768 +v 0.004010 0.000000 -0.000773 +v 0.004038 0.000000 -0.000756 +v -0.004045 0.000000 -0.000745 +v 0.004069 0.000000 -0.000739 +v -0.004086 0.000000 -0.000720 +v 0.004101 0.000000 -0.000721 +v 0.004136 0.000000 -0.000703 +v -0.004131 0.000000 -0.000694 +v 0.004172 0.000000 -0.000686 +v -0.004180 0.000000 -0.000666 +v 0.004442 0.000000 -0.000547 +v -0.004232 0.000000 -0.000638 +v -0.004286 0.000000 -0.000609 +v -0.004343 0.000000 -0.000580 +v -0.004614 0.000000 -0.000437 +v 0.004710 0.000000 -0.000394 +v -0.004875 0.000000 -0.000287 +v 0.004973 0.000000 -0.000229 +v -0.005123 0.000000 -0.000130 +v 0.005228 0.000000 -0.000055 +v -0.005359 -0.000000 0.000032 +v 0.005473 -0.000000 0.000127 +v -0.005581 -0.000000 0.000200 +v 0.005705 -0.000000 0.000314 +v -0.005788 -0.000000 0.000373 +v -0.000029 -0.000000 0.000403 +v 0.005922 -0.000000 0.000504 +v -0.005980 -0.000000 0.000549 +v -0.000229 -0.000000 0.000404 +v 0.000173 -0.000000 0.000409 +v -0.000423 -0.000000 0.000412 +v 0.000375 -0.000000 0.000422 +v -0.000610 -0.000000 0.000427 +v 0.000574 -0.000000 0.000442 +v -0.000785 -0.000000 0.000450 +v 0.000768 -0.000000 0.000468 +v -0.000948 -0.000000 0.000479 +v 0.000954 -0.000000 0.000501 +v -0.001157 -0.000000 0.000533 +v 0.001128 -0.000000 0.000540 +v 0.006122 -0.000000 0.000696 +v -0.001362 -0.000000 0.000600 +v 0.001289 -0.000000 0.000585 +v -0.006155 -0.000000 0.000729 +v 0.001464 -0.000000 0.000647 +v -0.001560 -0.000000 0.000678 +v 0.001636 -0.000000 0.000719 +v -0.001750 -0.000000 0.000767 +v 0.006300 -0.000000 0.000887 +v 0.001804 -0.000000 0.000800 +v -0.006313 -0.000000 0.000911 +v -0.001932 -0.000000 0.000867 +v 0.001966 -0.000000 0.000889 +v -0.002104 -0.000000 0.000976 +v 0.006457 -0.000000 0.001075 +v 0.002121 -0.000000 0.000986 +v -0.006453 -0.000000 0.001094 +v -0.002264 -0.000000 0.001094 +v 0.002268 -0.000000 0.001090 +v 0.006588 -0.000000 0.001258 +v 0.002405 -0.000000 0.001199 +v -0.002412 -0.000000 0.001219 +v -0.006573 -0.000000 0.001278 +v 0.002532 -0.000000 0.001313 +v -0.002546 -0.000000 0.001352 +v 0.006690 -0.000000 0.001433 +v -0.006673 -0.000000 0.001462 +v 0.002647 -0.000000 0.001431 +v -0.002665 -0.000000 0.001491 +v 0.002748 -0.000000 0.001551 +v 0.006763 -0.000000 0.001578 +v -0.006742 -0.000000 0.001607 +v -0.002768 -0.000000 0.001636 +v 0.002835 -0.000000 0.001674 +v 0.006826 -0.000000 0.001713 +v -0.006803 -0.000000 0.001749 +v -0.002854 -0.000000 0.001785 +v 0.002907 -0.000000 0.001797 +v 0.006879 -0.000000 0.001841 +v -0.006856 -0.000000 0.001890 +v -0.002931 -0.000000 0.001952 +v 0.002981 -0.000000 0.001959 +v 0.006924 -0.000000 0.001965 +v -0.006902 -0.000000 0.002032 +v -0.002995 -0.000000 0.002126 +v 0.003043 -0.000000 0.002130 +v 0.006960 -0.000000 0.002089 +v -0.006940 -0.000000 0.002174 +v 0.006990 -0.000000 0.002217 +v -0.003049 -0.000000 0.002307 +v 0.003094 -0.000000 0.002310 +v -0.006970 -0.000000 0.002321 +v 0.007013 -0.000000 0.002350 +v -0.003090 -0.000000 0.002493 +v 0.003133 -0.000000 0.002496 +v -0.006994 -0.000000 0.002472 +v 0.007030 -0.000000 0.002495 +v -0.007011 -0.000000 0.002630 +v -0.003120 -0.000000 0.002682 +v 0.007042 -0.000000 0.002652 +v 0.003161 -0.000000 0.002686 +v -0.007022 -0.000000 0.002796 +v 0.007050 -0.000000 0.002827 +v -0.003137 -0.000000 0.002873 +v 0.003178 -0.000000 0.002879 +v -0.007027 -0.000000 0.002971 +v 0.007054 -0.000000 0.003021 +v -0.003142 -0.000000 0.003065 +v 0.003182 -0.000000 0.003073 +v -0.007027 -0.000000 0.003157 +v 0.007055 -0.000000 0.003239 +v -0.003136 -0.000000 0.003255 +v 0.003174 -0.000000 0.003266 +v -0.007020 -0.000000 0.003357 +v 0.007053 -0.000000 0.003409 +v -0.003116 -0.000000 0.003443 +v 0.003155 -0.000000 0.003456 +v -0.007003 -0.000000 0.003644 +v 0.007051 -0.000000 0.003559 +v -0.003085 -0.000000 0.003626 +v 0.003124 -0.000000 0.003642 +v 0.007047 -0.000000 0.003690 +v -0.003040 -0.000000 0.003804 +v 0.003080 -0.000000 0.003821 +v -0.006974 -0.000000 0.003917 +v 0.007042 -0.000000 0.003806 +v -0.002983 -0.000000 0.003974 +v 0.007034 -0.000000 0.003909 +v 0.003025 -0.000000 0.003992 +v 0.007024 -0.000000 0.004003 +v -0.006930 -0.000000 0.004178 +v -0.002938 -0.000000 0.004081 +v 0.002974 -0.000000 0.004110 +v 0.007011 -0.000000 0.004090 +v -0.002883 -0.000000 0.004191 +v 0.006995 -0.000000 0.004174 +v 0.002910 -0.000000 0.004230 +v 0.006975 -0.000000 0.004257 +v -0.006872 -0.000000 0.004427 +v -0.002819 -0.000000 0.004304 +v 0.002834 -0.000000 0.004351 +v 0.006951 -0.000000 0.004343 +v -0.002748 -0.000000 0.004417 +v 0.006923 -0.000000 0.004434 +v 0.002748 -0.000000 0.004471 +v -0.002670 -0.000000 0.004529 +v -0.006799 -0.000000 0.004667 +v 0.006891 -0.000000 0.004533 +v 0.002653 -0.000000 0.004590 +v -0.002586 -0.000000 0.004638 +v 0.006832 -0.000000 0.004700 +v 0.002549 -0.000000 0.004706 +v -0.002499 -0.000000 0.004744 +v -0.006710 -0.000000 0.004899 +v 0.006769 -0.000000 0.004859 +v 0.002440 -0.000000 0.004817 +v -0.002409 -0.000000 0.004844 +v 0.002324 -0.000000 0.004922 +v -0.002318 -0.000000 0.004936 +v 0.006701 -0.000000 0.005012 +v -0.006604 -0.000000 0.005125 +v 0.002205 -0.000000 0.005020 +v -0.002226 -0.000000 0.005020 +v 0.006629 -0.000000 0.005160 +v 0.002083 -0.000000 0.005109 +v -0.002136 -0.000000 0.005094 +v -0.002048 -0.000000 0.005157 +v 0.001960 -0.000000 0.005189 +v -0.006482 -0.000000 0.005347 +v -0.002006 -0.000000 0.005181 +v 0.006551 -0.000000 0.005303 +v -0.001957 -0.000000 0.005208 +v 0.001836 -0.000000 0.005257 +v -0.001903 -0.000000 0.005236 +v -0.001844 -0.000000 0.005264 +v 0.001689 -0.000000 0.005327 +v -0.001780 -0.000000 0.005294 +v -0.001714 -0.000000 0.005323 +v 0.006466 -0.000000 0.005444 +v -0.001645 -0.000000 0.005353 +v 0.001547 -0.000000 0.005390 +v -0.006341 -0.000000 0.005566 +v -0.001575 -0.000000 0.005383 +v -0.001505 -0.000000 0.005410 +v 0.001408 -0.000000 0.005444 +v -0.001435 -0.000000 0.005437 +v -0.001366 -0.000000 0.005463 +v 0.006375 -0.000000 0.005582 +v 0.001269 -0.000000 0.005490 +v -0.001300 -0.000000 0.005486 +v -0.001203 -0.000000 0.005516 +v 0.001129 -0.000000 0.005530 +v -0.001113 -0.000000 0.005542 +v 0.000985 -0.000000 0.005563 +v -0.001029 -0.000000 0.005565 +v 0.000836 -0.000000 0.005589 +v -0.000946 -0.000000 0.005583 +v -0.006183 -0.000000 0.005783 +v 0.006276 -0.000000 0.005718 +v -0.000863 -0.000000 0.005599 +v 0.000679 -0.000000 0.005609 +v -0.000777 -0.000000 0.005611 +v 0.000513 -0.000000 0.005624 +v -0.000687 -0.000000 0.005621 +v -0.000588 -0.000000 0.005628 +v 0.000335 -0.000000 0.005634 +v -0.000479 -0.000000 0.005633 +v -0.000357 -0.000000 0.005637 +v 0.000143 -0.000000 0.005638 +v -0.000220 -0.000000 0.005638 +v -0.000065 -0.000000 0.005639 +v 0.006168 -0.000000 0.005855 +v -0.006005 -0.000000 0.006001 +v 0.006052 -0.000000 0.005992 +v 0.005926 -0.000000 0.006132 +v -0.005808 -0.000000 0.006221 +v 0.005790 -0.000000 0.006274 +v -0.005511 -0.000000 0.006517 +v 0.005653 -0.000000 0.006411 +v 0.005526 -0.000000 0.006533 +v -0.005195 -0.000000 0.006788 +v 0.005407 -0.000000 0.006644 +v 0.005291 -0.000000 0.006744 +v 0.005178 -0.000000 0.006836 +v -0.004860 -0.000000 0.007035 +v 0.005064 -0.000000 0.006922 +v 0.004947 -0.000000 0.007003 +v 0.004824 -0.000000 0.007081 +v -0.004504 -0.000000 0.007260 +v 0.004692 -0.000000 0.007160 +v 0.004549 -0.000000 0.007239 +v 0.004392 -0.000000 0.007322 +v -0.004127 -0.000000 0.007461 +v 0.004219 -0.000000 0.007410 +v 0.003870 -0.000000 0.007573 +v -0.003727 -0.000000 0.007640 +v 0.003502 -0.000000 0.007720 +v -0.003304 -0.000000 0.007797 +v 0.003117 -0.000000 0.007851 +v -0.002855 -0.000000 0.007933 +v 0.002718 -0.000000 0.007966 +v -0.002381 -0.000000 0.008047 +v 0.002306 -0.000000 0.008064 +v -0.001880 -0.000000 0.008141 +v 0.001882 -0.000000 0.008146 +v -0.001352 -0.000000 0.008215 +v 0.001449 -0.000000 0.008210 +v 0.001009 -0.000000 0.008258 +v -0.000794 -0.000000 0.008269 +v 0.000563 -0.000000 0.008288 +v -0.000341 -0.000000 0.008293 +v 0.000112 -0.000000 0.008299 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.028 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 12//1 11//1 10//1 +f 12//1 13//1 11//1 +f 14//1 13//1 12//1 +f 15//1 13//1 14//1 +f 15//1 16//1 13//1 +f 17//1 16//1 15//1 +f 18//1 16//1 17//1 +f 18//1 19//1 16//1 +f 20//1 19//1 18//1 +f 20//1 21//1 19//1 +f 22//1 21//1 20//1 +f 22//1 23//1 21//1 +f 24//1 23//1 22//1 +f 24//1 25//1 23//1 +f 26//1 25//1 24//1 +f 27//1 25//1 26//1 +f 27//1 28//1 25//1 +f 29//1 28//1 27//1 +f 29//1 30//1 28//1 +f 31//1 30//1 29//1 +f 31//1 32//1 30//1 +f 33//1 32//1 31//1 +f 34//1 32//1 33//1 +f 34//1 35//1 32//1 +f 36//1 35//1 34//1 +f 36//1 37//1 35//1 +f 38//1 39//1 36//1 +f 39//1 37//1 36//1 +f 40//1 39//1 38//1 +f 40//1 41//1 39//1 +f 42//1 37//1 39//1 +f 43//1 37//1 42//1 +f 40//1 44//1 41//1 +f 45//1 37//1 43//1 +f 40//1 46//1 44//1 +f 45//1 47//1 37//1 +f 48//1 47//1 45//1 +f 40//1 49//1 46//1 +f 50//1 47//1 48//1 +f 40//1 51//1 49//1 +f 52//1 47//1 50//1 +f 40//1 53//1 51//1 +f 54//1 53//1 40//1 +f 54//1 55//1 53//1 +f 56//1 47//1 52//1 +f 54//1 57//1 55//1 +f 58//1 47//1 56//1 +f 54//1 59//1 57//1 +f 60//1 47//1 58//1 +f 61//1 59//1 54//1 +f 60//1 62//1 47//1 +f 63//1 62//1 60//1 +f 63//1 64//1 62//1 +f 61//1 65//1 59//1 +f 66//1 64//1 63//1 +f 67//1 65//1 61//1 +f 66//1 68//1 64//1 +f 69//1 68//1 66//1 +f 67//1 70//1 65//1 +f 69//1 71//1 68//1 +f 72//1 71//1 69//1 +f 73//1 70//1 67//1 +f 72//1 74//1 71//1 +f 75//1 74//1 72//1 +f 73//1 76//1 70//1 +f 77//1 76//1 73//1 +f 78//1 74//1 75//1 +f 78//1 79//1 74//1 +f 77//1 80//1 76//1 +f 81//1 79//1 78//1 +f 81//1 82//1 79//1 +f 83//1 80//1 77//1 +f 84//1 82//1 81//1 +f 84//1 85//1 82//1 +f 83//1 86//1 80//1 +f 87//1 86//1 83//1 +f 84//1 88//1 85//1 +f 89//1 88//1 84//1 +f 89//1 90//1 88//1 +f 91//1 86//1 87//1 +f 91//1 92//1 86//1 +f 93//1 90//1 89//1 +f 93//1 94//1 90//1 +f 95//1 92//1 91//1 +f 93//1 96//1 94//1 +f 95//1 97//1 92//1 +f 98//1 96//1 93//1 +f 98//1 99//1 96//1 +f 100//1 97//1 95//1 +f 100//1 101//1 97//1 +f 102//1 99//1 98//1 +f 102//1 103//1 99//1 +f 104//1 101//1 100//1 +f 105//1 103//1 102//1 +f 104//1 106//1 101//1 +f 105//1 107//1 103//1 +f 108//1 106//1 104//1 +f 109//1 107//1 105//1 +f 108//1 110//1 106//1 +f 109//1 111//1 107//1 +f 112//1 110//1 108//1 +f 113//1 111//1 109//1 +f 112//1 114//1 110//1 +f 115//1 111//1 113//1 +f 115//1 116//1 111//1 +f 117//1 114//1 112//1 +f 118//1 116//1 115//1 +f 117//1 119//1 114//1 +f 120//1 119//1 117//1 +f 118//1 121//1 116//1 +f 120//1 122//1 119//1 +f 123//1 121//1 118//1 +f 120//1 124//1 122//1 +f 125//1 124//1 120//1 +f 123//1 126//1 121//1 +f 127//1 126//1 123//1 +f 125//1 128//1 124//1 +f 129//1 126//1 127//1 +f 125//1 130//1 128//1 +f 131//1 130//1 125//1 +f 129//1 132//1 126//1 +f 133//1 132//1 129//1 +f 131//1 134//1 130//1 +f 135//1 132//1 133//1 +f 131//1 136//1 134//1 +f 137//1 132//1 135//1 +f 138//1 136//1 131//1 +f 139//1 132//1 137//1 +f 138//1 140//1 136//1 +f 139//1 141//1 132//1 +f 142//1 141//1 139//1 +f 138//1 143//1 140//1 +f 144//1 141//1 142//1 +f 145//1 141//1 144//1 +f 138//1 146//1 143//1 +f 147//1 141//1 145//1 +f 138//1 148//1 146//1 +f 149//1 141//1 147//1 +f 150//1 141//1 149//1 +f 138//1 151//1 148//1 +f 151//1 141//1 150//1 +f 138//1 141//1 151//1 +f 152//1 141//1 138//1 +f 152//1 153//1 141//1 +f 154//1 153//1 152//1 +f 154//1 155//1 153//1 +f 156//1 155//1 154//1 +f 156//1 157//1 155//1 +f 158//1 157//1 156//1 +f 158//1 159//1 157//1 +f 158//1 160//1 159//1 +f 161//1 160//1 158//1 +f 161//1 162//1 160//1 +f 163//1 162//1 161//1 +f 163//1 164//1 162//1 +f 165//1 164//1 163//1 +f 165//1 166//1 164//1 +f 167//1 166//1 165//1 +f 167//1 168//1 166//1 +f 169//1 168//1 167//1 +f 169//1 170//1 168//1 +f 171//1 170//1 169//1 +f 171//1 172//1 170//1 +f 173//1 172//1 171//1 +f 173//1 174//1 172//1 +f 175//1 174//1 173//1 +f 175//1 176//1 174//1 +f 177//1 176//1 175//1 +f 177//1 178//1 176//1 +f 179//1 178//1 177//1 +f 179//1 180//1 178//1 +f 179//1 181//1 180//1 +f 182//1 181//1 179//1 +f 182//1 183//1 181//1 +f 184//1 183//1 182//1 +f 184//1 185//1 183//1 +f 186//1 185//1 184//1 +f 186//1 187//1 185//1 +f 188//1 187//1 186//1 +f 188//1 189//1 187//1 +f 190//1 189//1 188//1 +f 190//1 191//1 189//1 +f 192//1 191//1 190//1 +f 192//1 193//1 191//1 +f 192//1 194//1 193//1 +f 195//1 194//1 192//1 +f 195//1 196//1 194//1 +f 197//1 196//1 195//1 +f 197//1 198//1 196//1 +f 197//1 199//1 198//1 +f 200//1 199//1 197//1 +f 200//1 201//1 199//1 +f 202//1 201//1 200//1 +f 202//1 203//1 201//1 +f 204//1 203//1 202//1 +f 205//1 203//1 204//1 +f 206//1 203//1 205//1 +f 207//1 203//1 206//1 +f 207//1 208//1 203//1 +f 209//1 208//1 207//1 +f 209//1 210//1 208//1 +f 211//1 210//1 209//1 +f 211//1 212//1 210//1 +f 213//1 212//1 211//1 +f 213//1 214//1 212//1 +f 215//1 214//1 213//1 +f 215//1 216//1 214//1 +f 217//1 216//1 215//1 +f 217//1 218//1 216//1 +f 218//1 219//1 216//1 +f 220//1 221//1 217//1 +f 221//1 218//1 217//1 +f 222//1 219//1 218//1 +f 220//1 223//1 221//1 +f 224//1 219//1 222//1 +f 220//1 225//1 223//1 +f 226//1 219//1 224//1 +f 220//1 227//1 225//1 +f 228//1 219//1 226//1 +f 220//1 229//1 227//1 +f 230//1 219//1 228//1 +f 220//1 231//1 229//1 +f 232//1 219//1 230//1 +f 232//1 233//1 219//1 +f 220//1 234//1 231//1 +f 235//1 233//1 232//1 +f 236//1 234//1 220//1 +f 237//1 233//1 235//1 +f 236//1 238//1 234//1 +f 239//1 233//1 237//1 +f 236//1 240//1 238//1 +f 239//1 241//1 233//1 +f 242//1 241//1 239//1 +f 243//1 240//1 236//1 +f 243//1 244//1 240//1 +f 245//1 241//1 242//1 +f 243//1 246//1 244//1 +f 245//1 247//1 241//1 +f 248//1 247//1 245//1 +f 249//1 246//1 243//1 +f 249//1 250//1 246//1 +f 251//1 247//1 248//1 +f 251//1 252//1 247//1 +f 253//1 252//1 251//1 +f 249//1 254//1 250//1 +f 255//1 254//1 249//1 +f 256//1 252//1 253//1 +f 255//1 257//1 254//1 +f 256//1 258//1 252//1 +f 259//1 257//1 255//1 +f 260//1 258//1 256//1 +f 259//1 261//1 257//1 +f 262//1 258//1 260//1 +f 262//1 263//1 258//1 +f 264//1 261//1 259//1 +f 264//1 265//1 261//1 +f 266//1 263//1 262//1 +f 266//1 267//1 263//1 +f 268//1 265//1 264//1 +f 268//1 269//1 265//1 +f 270//1 267//1 266//1 +f 270//1 271//1 267//1 +f 272//1 269//1 268//1 +f 272//1 273//1 269//1 +f 274//1 271//1 270//1 +f 274//1 275//1 271//1 +f 276//1 273//1 272//1 +f 276//1 277//1 273//1 +f 278//1 275//1 274//1 +f 278//1 279//1 275//1 +f 280//1 277//1 276//1 +f 278//1 281//1 279//1 +f 280//1 282//1 277//1 +f 283//1 281//1 278//1 +f 284//1 282//1 280//1 +f 283//1 285//1 281//1 +f 284//1 286//1 282//1 +f 287//1 285//1 283//1 +f 288//1 286//1 284//1 +f 287//1 289//1 285//1 +f 290//1 286//1 288//1 +f 290//1 291//1 286//1 +f 287//1 292//1 289//1 +f 293//1 292//1 287//1 +f 294//1 291//1 290//1 +f 293//1 295//1 292//1 +f 294//1 296//1 291//1 +f 297//1 295//1 293//1 +f 298//1 296//1 294//1 +f 297//1 299//1 295//1 +f 298//1 300//1 296//1 +f 301//1 299//1 297//1 +f 302//1 300//1 298//1 +f 301//1 303//1 299//1 +f 302//1 304//1 300//1 +f 305//1 303//1 301//1 +f 306//1 304//1 302//1 +f 305//1 307//1 303//1 +f 306//1 308//1 304//1 +f 309//1 307//1 305//1 +f 310//1 308//1 306//1 +f 309//1 311//1 307//1 +f 310//1 312//1 308//1 +f 313//1 311//1 309//1 +f 313//1 314//1 311//1 +f 310//1 315//1 312//1 +f 316//1 314//1 313//1 +f 317//1 315//1 310//1 +f 316//1 318//1 314//1 +f 317//1 319//1 315//1 +f 316//1 320//1 318//1 +f 321//1 320//1 316//1 +f 321//1 322//1 320//1 +f 323//1 319//1 317//1 +f 323//1 324//1 319//1 +f 325//1 322//1 321//1 +f 325//1 326//1 322//1 +f 323//1 327//1 324//1 +f 325//1 328//1 326//1 +f 329//1 328//1 325//1 +f 329//1 330//1 328//1 +f 331//1 327//1 323//1 +f 331//1 332//1 327//1 +f 333//1 330//1 329//1 +f 333//1 334//1 330//1 +f 331//1 335//1 332//1 +f 333//1 336//1 334//1 +f 337//1 336//1 333//1 +f 331//1 338//1 335//1 +f 339//1 338//1 331//1 +f 337//1 340//1 336//1 +f 341//1 340//1 337//1 +f 339//1 342//1 338//1 +f 341//1 343//1 340//1 +f 344//1 343//1 341//1 +f 339//1 345//1 342//1 +f 346//1 345//1 339//1 +f 344//1 347//1 343//1 +f 348//1 347//1 344//1 +f 346//1 349//1 345//1 +f 350//1 347//1 348//1 +f 346//1 351//1 349//1 +f 350//1 352//1 347//1 +f 353//1 351//1 346//1 +f 354//1 352//1 350//1 +f 353//1 355//1 351//1 +f 354//1 356//1 352//1 +f 357//1 356//1 354//1 +f 353//1 358//1 355//1 +f 353//1 359//1 358//1 +f 360//1 356//1 357//1 +f 361//1 359//1 353//1 +f 361//1 362//1 359//1 +f 360//1 363//1 356//1 +f 361//1 364//1 362//1 +f 365//1 363//1 360//1 +f 361//1 366//1 364//1 +f 361//1 367//1 366//1 +f 368//1 363//1 365//1 +f 361//1 369//1 367//1 +f 361//1 370//1 369//1 +f 368//1 371//1 363//1 +f 361//1 372//1 370//1 +f 373//1 371//1 368//1 +f 374//1 372//1 361//1 +f 374//1 375//1 372//1 +f 374//1 376//1 375//1 +f 377//1 371//1 373//1 +f 374//1 378//1 376//1 +f 374//1 379//1 378//1 +f 377//1 380//1 371//1 +f 381//1 380//1 377//1 +f 374//1 382//1 379//1 +f 374//1 383//1 382//1 +f 384//1 380//1 381//1 +f 374//1 385//1 383//1 +f 386//1 380//1 384//1 +f 374//1 387//1 385//1 +f 388//1 380//1 386//1 +f 374//1 389//1 387//1 +f 390//1 389//1 374//1 +f 388//1 391//1 380//1 +f 390//1 392//1 389//1 +f 393//1 391//1 388//1 +f 390//1 394//1 392//1 +f 395//1 391//1 393//1 +f 390//1 396//1 394//1 +f 390//1 397//1 396//1 +f 398//1 391//1 395//1 +f 390//1 399//1 397//1 +f 390//1 400//1 399//1 +f 401//1 391//1 398//1 +f 390//1 402//1 400//1 +f 403//1 391//1 401//1 +f 390//1 403//1 402//1 +f 390//1 391//1 403//1 +f 390//1 404//1 391//1 +f 405//1 404//1 390//1 +f 405//1 406//1 404//1 +f 405//1 407//1 406//1 +f 408//1 407//1 405//1 +f 408//1 409//1 407//1 +f 410//1 409//1 408//1 +f 410//1 411//1 409//1 +f 410//1 412//1 411//1 +f 413//1 412//1 410//1 +f 413//1 414//1 412//1 +f 413//1 415//1 414//1 +f 413//1 416//1 415//1 +f 417//1 416//1 413//1 +f 417//1 418//1 416//1 +f 417//1 419//1 418//1 +f 417//1 420//1 419//1 +f 421//1 420//1 417//1 +f 421//1 422//1 420//1 +f 421//1 423//1 422//1 +f 421//1 424//1 423//1 +f 425//1 424//1 421//1 +f 425//1 426//1 424//1 +f 425//1 427//1 426//1 +f 428//1 427//1 425//1 +f 428//1 429//1 427//1 +f 430//1 429//1 428//1 +f 430//1 431//1 429//1 +f 432//1 431//1 430//1 +f 432//1 433//1 431//1 +f 434//1 433//1 432//1 +f 434//1 435//1 433//1 +f 436//1 435//1 434//1 +f 436//1 437//1 435//1 +f 438//1 437//1 436//1 +f 438//1 439//1 437//1 +f 438//1 440//1 439//1 +f 441//1 440//1 438//1 +f 441//1 442//1 440//1 +f 443//1 442//1 441//1 +f 443//1 444//1 442//1 diff --git a/alphanumeric/9.mtl b/alphanumeric/9.mtl new file mode 100644 index 0000000..5fecbcc --- /dev/null +++ b/alphanumeric/9.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.029 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/9.obj b/alphanumeric/9.obj new file mode 100644 index 0000000..428fcea --- /dev/null +++ b/alphanumeric/9.obj @@ -0,0 +1,657 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib 9.mtl +o Curve.006_Curve.086 +v -0.000578 0.000000 -0.008333 +v -0.000373 0.000000 -0.008333 +v -0.000479 0.000000 -0.008333 +v -0.000263 0.000000 -0.008332 +v -0.000670 0.000000 -0.008332 +v -0.000149 0.000000 -0.008332 +v -0.000754 0.000000 -0.008331 +v -0.000032 0.000000 -0.008330 +v -0.000827 0.000000 -0.008329 +v 0.000085 0.000000 -0.008327 +v -0.000889 0.000000 -0.008326 +v 0.000631 0.000000 -0.008298 +v -0.000938 0.000000 -0.008324 +v -0.000973 0.000000 -0.008321 +v -0.001380 0.000000 -0.008267 +v 0.001156 0.000000 -0.008240 +v -0.001765 0.000000 -0.008203 +v 0.001661 0.000000 -0.008151 +v -0.002130 0.000000 -0.008130 +v 0.002146 0.000000 -0.008032 +v -0.002478 0.000000 -0.008046 +v -0.002810 0.000000 -0.007951 +v 0.002612 0.000000 -0.007882 +v -0.003127 0.000000 -0.007845 +v 0.003058 0.000000 -0.007702 +v -0.003430 0.000000 -0.007726 +v -0.003723 0.000000 -0.007595 +v 0.003486 0.000000 -0.007491 +v -0.004006 0.000000 -0.007450 +v 0.003894 0.000000 -0.007248 +v -0.004281 0.000000 -0.007291 +v -0.004549 0.000000 -0.007117 +v 0.004285 0.000000 -0.006975 +v -0.004813 0.000000 -0.006928 +v 0.004657 0.000000 -0.006671 +v -0.004965 0.000000 -0.006806 +v -0.005112 0.000000 -0.006681 +v -0.005254 0.000000 -0.006550 +v 0.005011 0.000000 -0.006336 +v -0.005389 0.000000 -0.006415 +v -0.005519 0.000000 -0.006275 +v 0.005348 0.000000 -0.005970 +v -0.005642 0.000000 -0.006130 +v -0.005760 0.000000 -0.005981 +v -0.005872 0.000000 -0.005826 +v -0.000268 0.000000 -0.005818 +v -0.000060 0.000000 -0.005813 +v 0.005607 0.000000 -0.005632 +v -0.005979 0.000000 -0.005666 +v -0.000471 0.000000 -0.005817 +v -0.000665 0.000000 -0.005808 +v 0.000146 0.000000 -0.005802 +v -0.000845 0.000000 -0.005793 +v 0.000348 0.000000 -0.005784 +v -0.001006 0.000000 -0.005770 +v 0.000538 0.000000 -0.005760 +v -0.001144 0.000000 -0.005740 +v 0.000713 0.000000 -0.005729 +v -0.001438 0.000000 -0.005640 +v 0.000867 0.000000 -0.005693 +v 0.000997 0.000000 -0.005652 +v -0.006080 0.000000 -0.005500 +v 0.001296 0.000000 -0.005523 +v -0.001712 0.000000 -0.005510 +v 0.005845 0.000000 -0.005257 +v 0.001570 0.000000 -0.005373 +v -0.001964 0.000000 -0.005352 +v -0.006175 0.000000 -0.005329 +v 0.001820 0.000000 -0.005203 +v -0.002194 0.000000 -0.005167 +v -0.006265 0.000000 -0.005152 +v 0.006060 0.000000 -0.004847 +v 0.002045 0.000000 -0.005014 +v -0.002400 0.000000 -0.004957 +v -0.006343 0.000000 -0.004988 +v 0.002244 0.000000 -0.004806 +v -0.006413 0.000000 -0.004831 +v -0.002583 0.000000 -0.004723 +v 0.006251 0.000000 -0.004405 +v -0.006476 0.000000 -0.004680 +v 0.002417 0.000000 -0.004578 +v -0.002740 0.000000 -0.004466 +v -0.006532 0.000000 -0.004533 +v 0.002565 0.000000 -0.004333 +v -0.006582 0.000000 -0.004389 +v -0.002871 0.000000 -0.004188 +v 0.006419 0.000000 -0.003933 +v -0.006626 0.000000 -0.004245 +v 0.002686 0.000000 -0.004070 +v -0.006664 0.000000 -0.004101 +v -0.002974 0.000000 -0.003890 +v -0.006697 0.000000 -0.003954 +v 0.002782 0.000000 -0.003790 +v -0.006725 0.000000 -0.003803 +v 0.006563 0.000000 -0.003432 +v -0.003050 0.000000 -0.003573 +v -0.006750 0.000000 -0.003645 +v 0.002851 0.000000 -0.003492 +v -0.006771 0.000000 -0.003481 +v -0.003096 0.000000 -0.003239 +v 0.002892 0.000000 -0.003178 +v -0.006788 0.000000 -0.003307 +v 0.006681 0.000000 -0.002905 +v -0.006812 0.000000 -0.002733 +v -0.003113 0.000000 -0.002889 +v 0.002907 0.000000 -0.002848 +v 0.006774 0.000000 -0.002353 +v -0.003109 0.000000 -0.002673 +v 0.002880 0.000000 -0.002394 +v -0.006782 0.000000 -0.002172 +v -0.003095 0.000000 -0.002467 +v -0.003073 0.000000 -0.002270 +v 0.002799 0.000000 -0.001969 +v 0.006842 0.000000 -0.001780 +v -0.003041 0.000000 -0.002080 +v -0.006700 0.000000 -0.001628 +v -0.002998 0.000000 -0.001898 +v 0.002666 0.000000 -0.001575 +v -0.002945 0.000000 -0.001722 +v 0.006883 0.000000 -0.001187 +v -0.002881 0.000000 -0.001550 +v -0.006567 0.000000 -0.001102 +v 0.002483 0.000000 -0.001215 +v -0.002806 0.000000 -0.001382 +v -0.002719 0.000000 -0.001217 +v -0.002620 0.000000 -0.001053 +v 0.002254 0.000000 -0.000891 +v 0.006896 0.000000 -0.000576 +v -0.006385 0.000000 -0.000598 +v -0.002509 0.000000 -0.000890 +v 0.001981 0.000000 -0.000606 +v -0.002384 0.000000 -0.000726 +v -0.002273 0.000000 -0.000599 +v 0.001667 0.000000 -0.000362 +v -0.002153 0.000000 -0.000481 +v -0.006156 0.000000 -0.000120 +v 0.006882 -0.000000 0.000050 +v -0.002023 0.000000 -0.000373 +v -0.001883 0.000000 -0.000275 +v 0.001316 0.000000 -0.000163 +v -0.001733 0.000000 -0.000188 +v -0.001574 0.000000 -0.000110 +v 0.000929 0.000000 -0.000009 +v -0.005881 -0.000000 0.000330 +v -0.001405 0.000000 -0.000041 +v -0.001226 -0.000000 0.000017 +v 0.000510 -0.000000 0.000096 +v -0.001037 -0.000000 0.000065 +v 0.006836 -0.000000 0.000774 +v -0.000840 -0.000000 0.000103 +v 0.000061 -0.000000 0.000150 +v -0.000632 -0.000000 0.000132 +v -0.000414 -0.000000 0.000150 +v -0.005563 -0.000000 0.000749 +v -0.005203 -0.000000 0.001133 +v 0.002906 -0.000000 0.001259 +v 0.006765 -0.000000 0.001464 +v -0.004801 -0.000000 0.001480 +v 0.002885 -0.000000 0.001263 +v 0.002919 -0.000000 0.001261 +v 0.002940 -0.000000 0.001301 +v 0.002858 -0.000000 0.001274 +v 0.002825 -0.000000 0.001291 +v 0.002786 -0.000000 0.001313 +v 0.002954 -0.000000 0.001383 +v 0.002743 -0.000000 0.001341 +v 0.002696 -0.000000 0.001374 +v 0.002645 -0.000000 0.001412 +v 0.002961 -0.000000 0.001503 +v 0.002591 -0.000000 0.001454 +v 0.002534 -0.000000 0.001500 +v 0.006669 -0.000000 0.002121 +v -0.004361 -0.000000 0.001786 +v 0.002475 -0.000000 0.001549 +v 0.002962 -0.000000 0.001652 +v 0.002414 -0.000000 0.001602 +v 0.002205 -0.000000 0.001773 +v 0.002956 -0.000000 0.001824 +v 0.001995 -0.000000 0.001928 +v -0.003883 -0.000000 0.002049 +v 0.002945 -0.000000 0.002014 +v 0.001780 -0.000000 0.002066 +v 0.002929 -0.000000 0.002213 +v -0.003634 -0.000000 0.002165 +v 0.001559 -0.000000 0.002189 +v 0.006549 -0.000000 0.002743 +v -0.003382 -0.000000 0.002268 +v 0.001331 -0.000000 0.002298 +v 0.002908 -0.000000 0.002416 +v -0.003129 -0.000000 0.002360 +v 0.001094 -0.000000 0.002392 +v -0.002872 -0.000000 0.002441 +v 0.000848 -0.000000 0.002472 +v 0.002884 -0.000000 0.002617 +v -0.002611 -0.000000 0.002509 +v 0.000589 -0.000000 0.002539 +v -0.002345 -0.000000 0.002567 +v 0.000317 -0.000000 0.002593 +v -0.002074 -0.000000 0.002613 +v 0.000031 -0.000000 0.002635 +v -0.001795 -0.000000 0.002648 +v 0.002855 -0.000000 0.002808 +v -0.000271 -0.000000 0.002665 +v -0.001508 -0.000000 0.002673 +v -0.000591 -0.000000 0.002683 +v -0.001212 -0.000000 0.002687 +v -0.000907 -0.000000 0.002690 +v 0.006404 -0.000000 0.003332 +v 0.002824 -0.000000 0.002983 +v 0.002790 -0.000000 0.003136 +v 0.002679 -0.000000 0.003514 +v 0.006235 -0.000000 0.003886 +v 0.002549 -0.000000 0.003861 +v 0.002399 -0.000000 0.004177 +v 0.006041 -0.000000 0.004407 +v -0.005533 -0.000000 0.004174 +v -0.005100 -0.000000 0.004174 +v -0.005323 -0.000000 0.004174 +v -0.005728 -0.000000 0.004175 +v -0.004867 -0.000000 0.004175 +v -0.005906 -0.000000 0.004176 +v -0.004624 -0.000000 0.004177 +v -0.006063 -0.000000 0.004177 +v -0.002837 -0.000000 0.004194 +v -0.006199 -0.000000 0.004179 +v 0.002229 -0.000000 0.004463 +v -0.006310 -0.000000 0.004181 +v -0.006395 -0.000000 0.004183 +v -0.006452 -0.000000 0.004186 +v -0.006477 -0.000000 0.004189 +v -0.006495 -0.000000 0.004208 +v -0.002836 -0.000000 0.004198 +v -0.002833 -0.000000 0.004208 +v -0.006507 -0.000000 0.004245 +v -0.002828 -0.000000 0.004222 +v -0.002822 -0.000000 0.004240 +v -0.002815 -0.000000 0.004260 +v -0.006512 -0.000000 0.004300 +v -0.002808 -0.000000 0.004283 +v -0.002800 -0.000000 0.004304 +v -0.006511 -0.000000 0.004369 +v -0.002794 -0.000000 0.004325 +v -0.002787 -0.000000 0.004343 +v -0.002783 -0.000000 0.004358 +v -0.002779 -0.000000 0.004367 +v -0.002778 -0.000000 0.004371 +v -0.006505 -0.000000 0.004451 +v -0.002752 -0.000000 0.004436 +v 0.005822 -0.000000 0.004893 +v -0.002714 -0.000000 0.004511 +v -0.006493 -0.000000 0.004543 +v 0.002038 -0.000000 0.004718 +v -0.002666 -0.000000 0.004593 +v -0.006475 -0.000000 0.004645 +v -0.002612 -0.000000 0.004679 +v -0.006454 -0.000000 0.004753 +v -0.002551 -0.000000 0.004768 +v 0.001826 -0.000000 0.004944 +v -0.006427 -0.000000 0.004866 +v -0.002485 -0.000000 0.004857 +v -0.002417 -0.000000 0.004945 +v -0.006396 -0.000000 0.004981 +v 0.005578 -0.000000 0.005345 +v 0.001593 -0.000000 0.005140 +v -0.002347 -0.000000 0.005029 +v -0.006362 -0.000000 0.005097 +v -0.002278 -0.000000 0.005107 +v -0.006324 -0.000000 0.005212 +v -0.002211 -0.000000 0.005176 +v 0.001338 -0.000000 0.005307 +v -0.002148 -0.000000 0.005235 +v -0.006255 -0.000000 0.005382 +v -0.002090 -0.000000 0.005282 +v -0.001970 -0.000000 0.005358 +v 0.001061 -0.000000 0.005446 +v 0.005310 -0.000000 0.005762 +v -0.001829 -0.000000 0.005429 +v -0.006166 -0.000000 0.005560 +v -0.001671 -0.000000 0.005493 +v 0.000761 -0.000000 0.005556 +v -0.001500 -0.000000 0.005549 +v -0.001316 -0.000000 0.005598 +v 0.000438 -0.000000 0.005639 +v -0.006060 -0.000000 0.005743 +v -0.001123 -0.000000 0.005640 +v 0.000091 -0.000000 0.005693 +v -0.000923 -0.000000 0.005673 +v -0.000719 -0.000000 0.005696 +v -0.000105 -0.000000 0.005710 +v -0.000513 -0.000000 0.005711 +v -0.000307 -0.000000 0.005716 +v -0.005937 -0.000000 0.005929 +v 0.005018 -0.000000 0.006145 +v -0.005801 -0.000000 0.006116 +v -0.005652 -0.000000 0.006302 +v 0.004701 -0.000000 0.006493 +v -0.005493 -0.000000 0.006485 +v -0.005324 -0.000000 0.006663 +v 0.004379 -0.000000 0.006795 +v -0.005150 -0.000000 0.006832 +v 0.004040 -0.000000 0.007068 +v -0.004970 -0.000000 0.006993 +v -0.004786 -0.000000 0.007140 +v 0.003686 -0.000000 0.007314 +v -0.004601 -0.000000 0.007275 +v -0.004341 -0.000000 0.007439 +v 0.003313 -0.000000 0.007533 +v -0.004062 -0.000000 0.007590 +v 0.002921 -0.000000 0.007725 +v -0.003763 -0.000000 0.007729 +v 0.002509 -0.000000 0.007890 +v -0.003448 -0.000000 0.007853 +v -0.003117 -0.000000 0.007965 +v 0.002077 -0.000000 0.008028 +v -0.002770 -0.000000 0.008062 +v 0.001622 -0.000000 0.008141 +v -0.002409 -0.000000 0.008146 +v 0.001146 -0.000000 0.008228 +v -0.002036 -0.000000 0.008214 +v -0.001650 -0.000000 0.008268 +v 0.000645 -0.000000 0.008290 +v -0.001253 -0.000000 0.008307 +v 0.000119 -0.000000 0.008326 +v -0.000847 -0.000000 0.008331 +v -0.000432 -0.000000 0.008339 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.029 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 9//1 10//1 8//1 +f 11//1 10//1 9//1 +f 11//1 12//1 10//1 +f 13//1 12//1 11//1 +f 14//1 12//1 13//1 +f 15//1 12//1 14//1 +f 15//1 16//1 12//1 +f 17//1 16//1 15//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 19//1 20//1 18//1 +f 21//1 20//1 19//1 +f 22//1 20//1 21//1 +f 22//1 23//1 20//1 +f 24//1 23//1 22//1 +f 24//1 25//1 23//1 +f 26//1 25//1 24//1 +f 27//1 25//1 26//1 +f 27//1 28//1 25//1 +f 29//1 28//1 27//1 +f 29//1 30//1 28//1 +f 31//1 30//1 29//1 +f 32//1 30//1 31//1 +f 32//1 33//1 30//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 36//1 35//1 34//1 +f 37//1 35//1 36//1 +f 38//1 35//1 37//1 +f 38//1 39//1 35//1 +f 40//1 39//1 38//1 +f 41//1 39//1 40//1 +f 41//1 42//1 39//1 +f 43//1 42//1 41//1 +f 44//1 42//1 43//1 +f 45//1 42//1 44//1 +f 45//1 46//1 42//1 +f 46//1 47//1 42//1 +f 47//1 48//1 42//1 +f 49//1 50//1 45//1 +f 50//1 46//1 45//1 +f 49//1 51//1 50//1 +f 52//1 48//1 47//1 +f 49//1 53//1 51//1 +f 54//1 48//1 52//1 +f 49//1 55//1 53//1 +f 56//1 48//1 54//1 +f 49//1 57//1 55//1 +f 58//1 48//1 56//1 +f 49//1 59//1 57//1 +f 60//1 48//1 58//1 +f 61//1 48//1 60//1 +f 62//1 59//1 49//1 +f 63//1 48//1 61//1 +f 62//1 64//1 59//1 +f 63//1 65//1 48//1 +f 66//1 65//1 63//1 +f 62//1 67//1 64//1 +f 68//1 67//1 62//1 +f 69//1 65//1 66//1 +f 68//1 70//1 67//1 +f 71//1 70//1 68//1 +f 69//1 72//1 65//1 +f 73//1 72//1 69//1 +f 71//1 74//1 70//1 +f 75//1 74//1 71//1 +f 76//1 72//1 73//1 +f 77//1 74//1 75//1 +f 77//1 78//1 74//1 +f 76//1 79//1 72//1 +f 80//1 78//1 77//1 +f 81//1 79//1 76//1 +f 80//1 82//1 78//1 +f 83//1 82//1 80//1 +f 84//1 79//1 81//1 +f 85//1 82//1 83//1 +f 85//1 86//1 82//1 +f 84//1 87//1 79//1 +f 88//1 86//1 85//1 +f 89//1 87//1 84//1 +f 90//1 86//1 88//1 +f 90//1 91//1 86//1 +f 92//1 91//1 90//1 +f 93//1 87//1 89//1 +f 94//1 91//1 92//1 +f 93//1 95//1 87//1 +f 94//1 96//1 91//1 +f 97//1 96//1 94//1 +f 98//1 95//1 93//1 +f 99//1 96//1 97//1 +f 99//1 100//1 96//1 +f 101//1 95//1 98//1 +f 102//1 100//1 99//1 +f 101//1 103//1 95//1 +f 104//1 100//1 102//1 +f 104//1 105//1 100//1 +f 106//1 103//1 101//1 +f 106//1 107//1 103//1 +f 104//1 108//1 105//1 +f 109//1 107//1 106//1 +f 110//1 108//1 104//1 +f 110//1 111//1 108//1 +f 110//1 112//1 111//1 +f 113//1 107//1 109//1 +f 113//1 114//1 107//1 +f 110//1 115//1 112//1 +f 116//1 115//1 110//1 +f 116//1 117//1 115//1 +f 118//1 114//1 113//1 +f 116//1 119//1 117//1 +f 118//1 120//1 114//1 +f 116//1 121//1 119//1 +f 122//1 121//1 116//1 +f 123//1 120//1 118//1 +f 122//1 124//1 121//1 +f 122//1 125//1 124//1 +f 122//1 126//1 125//1 +f 127//1 120//1 123//1 +f 127//1 128//1 120//1 +f 129//1 126//1 122//1 +f 129//1 130//1 126//1 +f 131//1 128//1 127//1 +f 129//1 132//1 130//1 +f 129//1 133//1 132//1 +f 134//1 128//1 131//1 +f 129//1 135//1 133//1 +f 136//1 135//1 129//1 +f 134//1 137//1 128//1 +f 136//1 138//1 135//1 +f 136//1 139//1 138//1 +f 140//1 137//1 134//1 +f 136//1 141//1 139//1 +f 136//1 142//1 141//1 +f 143//1 137//1 140//1 +f 144//1 142//1 136//1 +f 144//1 145//1 142//1 +f 144//1 146//1 145//1 +f 147//1 137//1 143//1 +f 144//1 148//1 146//1 +f 147//1 149//1 137//1 +f 144//1 150//1 148//1 +f 151//1 149//1 147//1 +f 144//1 152//1 150//1 +f 144//1 153//1 152//1 +f 144//1 151//1 153//1 +f 144//1 149//1 151//1 +f 154//1 149//1 144//1 +f 155//1 149//1 154//1 +f 155//1 156//1 149//1 +f 156//1 157//1 149//1 +f 158//1 156//1 155//1 +f 158//1 159//1 156//1 +f 160//1 157//1 156//1 +f 161//1 157//1 160//1 +f 158//1 162//1 159//1 +f 158//1 163//1 162//1 +f 158//1 164//1 163//1 +f 165//1 157//1 161//1 +f 158//1 166//1 164//1 +f 158//1 167//1 166//1 +f 158//1 168//1 167//1 +f 169//1 157//1 165//1 +f 158//1 170//1 168//1 +f 158//1 171//1 170//1 +f 169//1 172//1 157//1 +f 173//1 171//1 158//1 +f 173//1 174//1 171//1 +f 175//1 172//1 169//1 +f 173//1 176//1 174//1 +f 173//1 177//1 176//1 +f 178//1 172//1 175//1 +f 173//1 179//1 177//1 +f 180//1 179//1 173//1 +f 181//1 172//1 178//1 +f 180//1 182//1 179//1 +f 183//1 172//1 181//1 +f 184//1 182//1 180//1 +f 184//1 185//1 182//1 +f 183//1 186//1 172//1 +f 187//1 185//1 184//1 +f 187//1 188//1 185//1 +f 189//1 186//1 183//1 +f 190//1 188//1 187//1 +f 190//1 191//1 188//1 +f 192//1 191//1 190//1 +f 192//1 193//1 191//1 +f 194//1 186//1 189//1 +f 195//1 193//1 192//1 +f 195//1 196//1 193//1 +f 197//1 196//1 195//1 +f 197//1 198//1 196//1 +f 199//1 198//1 197//1 +f 199//1 200//1 198//1 +f 201//1 200//1 199//1 +f 202//1 186//1 194//1 +f 201//1 203//1 200//1 +f 204//1 203//1 201//1 +f 204//1 205//1 203//1 +f 206//1 205//1 204//1 +f 206//1 207//1 205//1 +f 202//1 208//1 186//1 +f 209//1 208//1 202//1 +f 210//1 208//1 209//1 +f 211//1 208//1 210//1 +f 211//1 212//1 208//1 +f 213//1 212//1 211//1 +f 214//1 212//1 213//1 +f 214//1 215//1 212//1 +f 216//1 217//1 218//1 +f 219//1 217//1 216//1 +f 219//1 220//1 217//1 +f 221//1 220//1 219//1 +f 221//1 222//1 220//1 +f 223//1 222//1 221//1 +f 223//1 224//1 222//1 +f 225//1 224//1 223//1 +f 226//1 215//1 214//1 +f 227//1 224//1 225//1 +f 228//1 224//1 227//1 +f 229//1 224//1 228//1 +f 230//1 224//1 229//1 +f 231//1 224//1 230//1 +f 231//1 232//1 224//1 +f 231//1 233//1 232//1 +f 234//1 233//1 231//1 +f 234//1 235//1 233//1 +f 234//1 236//1 235//1 +f 234//1 237//1 236//1 +f 238//1 237//1 234//1 +f 238//1 239//1 237//1 +f 238//1 240//1 239//1 +f 241//1 240//1 238//1 +f 241//1 242//1 240//1 +f 241//1 243//1 242//1 +f 241//1 244//1 243//1 +f 241//1 245//1 244//1 +f 241//1 246//1 245//1 +f 247//1 246//1 241//1 +f 247//1 248//1 246//1 +f 226//1 249//1 215//1 +f 247//1 250//1 248//1 +f 251//1 250//1 247//1 +f 252//1 249//1 226//1 +f 251//1 253//1 250//1 +f 254//1 253//1 251//1 +f 254//1 255//1 253//1 +f 256//1 255//1 254//1 +f 256//1 257//1 255//1 +f 258//1 249//1 252//1 +f 259//1 257//1 256//1 +f 259//1 260//1 257//1 +f 259//1 261//1 260//1 +f 262//1 261//1 259//1 +f 258//1 263//1 249//1 +f 264//1 263//1 258//1 +f 262//1 265//1 261//1 +f 266//1 265//1 262//1 +f 266//1 267//1 265//1 +f 268//1 267//1 266//1 +f 268//1 269//1 267//1 +f 270//1 263//1 264//1 +f 268//1 271//1 269//1 +f 272//1 271//1 268//1 +f 272//1 273//1 271//1 +f 272//1 274//1 273//1 +f 275//1 263//1 270//1 +f 275//1 276//1 263//1 +f 272//1 277//1 274//1 +f 278//1 277//1 272//1 +f 278//1 279//1 277//1 +f 280//1 276//1 275//1 +f 278//1 281//1 279//1 +f 278//1 282//1 281//1 +f 283//1 276//1 280//1 +f 284//1 282//1 278//1 +f 284//1 285//1 282//1 +f 286//1 276//1 283//1 +f 284//1 287//1 285//1 +f 284//1 288//1 287//1 +f 289//1 276//1 286//1 +f 284//1 290//1 288//1 +f 291//1 276//1 289//1 +f 284//1 291//1 290//1 +f 284//1 276//1 291//1 +f 292//1 276//1 284//1 +f 292//1 293//1 276//1 +f 294//1 293//1 292//1 +f 295//1 293//1 294//1 +f 295//1 296//1 293//1 +f 297//1 296//1 295//1 +f 298//1 296//1 297//1 +f 298//1 299//1 296//1 +f 300//1 299//1 298//1 +f 300//1 301//1 299//1 +f 302//1 301//1 300//1 +f 303//1 301//1 302//1 +f 303//1 304//1 301//1 +f 305//1 304//1 303//1 +f 306//1 304//1 305//1 +f 306//1 307//1 304//1 +f 308//1 307//1 306//1 +f 308//1 309//1 307//1 +f 310//1 309//1 308//1 +f 310//1 311//1 309//1 +f 312//1 311//1 310//1 +f 313//1 311//1 312//1 +f 313//1 314//1 311//1 +f 315//1 314//1 313//1 +f 315//1 316//1 314//1 +f 317//1 316//1 315//1 +f 317//1 318//1 316//1 +f 319//1 318//1 317//1 +f 320//1 318//1 319//1 +f 320//1 321//1 318//1 +f 322//1 321//1 320//1 +f 322//1 323//1 321//1 +f 324//1 323//1 322//1 +f 324//1 325//1 323//1 diff --git a/alphanumeric/A.mtl b/alphanumeric/A.mtl new file mode 100644 index 0000000..6a19955 --- /dev/null +++ b/alphanumeric/A.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.033 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/A.obj b/alphanumeric/A.obj new file mode 100644 index 0000000..9b31465 --- /dev/null +++ b/alphanumeric/A.obj @@ -0,0 +1,364 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib A.mtl +o A +v -0.002804 0.000000 -0.008154 +v -0.002727 0.000000 -0.008213 +v -0.002782 0.000000 -0.008213 +v -0.002576 0.000000 -0.008213 +v -0.002348 0.000000 -0.008213 +v -0.002062 0.000000 -0.008213 +v -0.001738 0.000000 -0.008213 +v -0.001394 0.000000 -0.008213 +v -0.001050 0.000000 -0.008213 +v -0.000726 0.000000 -0.008213 +v -0.000440 0.000000 -0.008213 +v -0.000212 0.000000 -0.008213 +v -0.000061 0.000000 -0.008213 +v -0.000006 0.000000 -0.008213 +v 0.000394 0.000000 -0.008212 +v 0.000772 0.000000 -0.008209 +v 0.001125 0.000000 -0.008203 +v 0.001452 0.000000 -0.008197 +v 0.001749 0.000000 -0.008189 +v 0.002016 0.000000 -0.008178 +v 0.002250 0.000000 -0.008166 +v 0.002448 0.000000 -0.008154 +v 0.002608 0.000000 -0.008139 +v -0.002865 0.000000 -0.007989 +v 0.002729 0.000000 -0.008124 +v 0.002809 0.000000 -0.008106 +v 0.002844 0.000000 -0.008087 +v 0.002981 0.000000 -0.007745 +v -0.002959 0.000000 -0.007741 +v 0.003322 0.000000 -0.006858 +v -0.003075 0.000000 -0.007431 +v -0.003208 0.000000 -0.007079 +v -0.003348 0.000000 -0.006706 +v 0.003826 0.000000 -0.005536 +v -0.003488 0.000000 -0.006333 +v -0.003620 0.000000 -0.005981 +v -0.003737 0.000000 -0.005671 +v -0.003830 0.000000 -0.005424 +v 0.004452 0.000000 -0.003888 +v -0.003892 0.000000 -0.005260 +v -0.003914 0.000000 -0.005201 +v -0.004085 0.000000 -0.004744 +v -0.004283 0.000000 -0.004213 +v -0.004506 0.000000 -0.003619 +v -0.000042 0.000000 -0.003732 +v 0.005158 0.000000 -0.002021 +v -0.000060 0.000000 -0.003730 +v -0.000026 0.000000 -0.003717 +v -0.000083 0.000000 -0.003699 +v -0.000006 0.000000 -0.003695 +v -0.000115 0.000000 -0.003630 +v 0.000011 0.000000 -0.003661 +v 0.000042 0.000000 -0.003578 +v -0.000159 0.000000 -0.003508 +v -0.004749 0.000000 -0.002971 +v 0.000087 0.000000 -0.003452 +v -0.000219 0.000000 -0.003328 +v 0.000144 0.000000 -0.003284 +v -0.000298 0.000000 -0.003074 +v 0.000212 0.000000 -0.003080 +v 0.000291 0.000000 -0.002840 +v -0.000402 0.000000 -0.002736 +v -0.005009 0.000000 -0.002278 +v 0.000378 0.000000 -0.002570 +v -0.000532 0.000000 -0.002306 +v 0.000474 0.000000 -0.002273 +v -0.000694 0.000000 -0.001770 +v -0.005282 0.000000 -0.001552 +v 0.000577 0.000000 -0.001952 +v 0.005904 0.000000 -0.000045 +v 0.000685 0.000000 -0.001611 +v -0.000890 0.000000 -0.001119 +v 0.000798 0.000000 -0.001254 +v -0.005563 0.000000 -0.000801 +v 0.000915 0.000000 -0.000883 +v -0.000986 0.000000 -0.000804 +v 0.001749 -0.000000 0.001867 +v -0.001079 0.000000 -0.000496 +v -0.005850 0.000000 -0.000036 +v -0.001170 0.000000 -0.000198 +v -0.001257 -0.000000 0.000087 +v 0.006649 -0.000000 0.001932 +v -0.006139 -0.000000 0.000735 +v -0.001339 -0.000000 0.000356 +v -0.001416 -0.000000 0.000605 +v -0.001485 -0.000000 0.000832 +v -0.006425 -0.000000 0.001499 +v -0.001547 -0.000000 0.001034 +v -0.001600 -0.000000 0.001206 +v -0.001644 -0.000000 0.001346 +v -0.001677 -0.000000 0.001452 +v -0.001699 -0.000000 0.001518 +v -0.006705 -0.000000 0.002249 +v -0.001811 -0.000000 0.001867 +v -0.000031 -0.000000 0.001867 +v 0.007351 -0.000000 0.003801 +v -0.006976 -0.000000 0.002974 +v -0.007239 -0.000000 0.003669 +v -0.007491 -0.000000 0.004337 +v 0.007970 -0.000000 0.005453 +v -0.007731 -0.000000 0.004973 +v -0.007955 -0.000000 0.005569 +v -0.002918 -0.000000 0.005475 +v -0.000044 -0.000000 0.005500 +v 0.002831 -0.000000 0.005537 +v 0.008465 -0.000000 0.006781 +v -0.002923 -0.000000 0.005492 +v -0.002937 -0.000000 0.005537 +v -0.002957 -0.000000 0.005606 +v 0.003254 -0.000000 0.006881 +v -0.008161 -0.000000 0.006120 +v -0.002983 -0.000000 0.005691 +v -0.003012 -0.000000 0.005788 +v -0.003043 -0.000000 0.005892 +v -0.003074 -0.000000 0.005995 +v -0.003103 -0.000000 0.006092 +v -0.003128 -0.000000 0.006179 +v -0.008348 -0.000000 0.006617 +v -0.003149 -0.000000 0.006247 +v -0.003162 -0.000000 0.006293 +v -0.003167 -0.000000 0.006309 +v -0.003203 -0.000000 0.006429 +v -0.003239 -0.000000 0.006555 +v -0.003277 -0.000000 0.006682 +v -0.008512 -0.000000 0.007059 +v -0.003315 -0.000000 0.006810 +v 0.008794 -0.000000 0.007675 +v -0.003354 -0.000000 0.006938 +v 0.003665 -0.000000 0.008213 +v -0.003391 -0.000000 0.007063 +v -0.008652 -0.000000 0.007434 +v -0.003428 -0.000000 0.007185 +v -0.003463 -0.000000 0.007301 +v -0.003496 -0.000000 0.007409 +v -0.003527 -0.000000 0.007510 +v -0.008764 -0.000000 0.007739 +v -0.003554 -0.000000 0.007600 +v -0.003578 -0.000000 0.007678 +v 0.008917 -0.000000 0.008025 +v -0.003727 -0.000000 0.008213 +v -0.008848 -0.000000 0.007965 +v -0.008899 -0.000000 0.008110 +v 0.008900 -0.000000 0.008043 +v 0.008844 -0.000000 0.008062 +v 0.008749 -0.000000 0.008079 +v 0.008616 -0.000000 0.008096 +v 0.008446 -0.000000 0.008110 +v -0.008917 -0.000000 0.008163 +v 0.008240 -0.000000 0.008126 +v 0.008000 -0.000000 0.008139 +v 0.007725 -0.000000 0.008151 +v 0.007417 -0.000000 0.008163 +v 0.007077 -0.000000 0.008172 +v -0.008893 -0.000000 0.008168 +v -0.008824 -0.000000 0.008174 +v 0.006706 -0.000000 0.008181 +v -0.008712 -0.000000 0.008181 +v 0.006304 -0.000000 0.008188 +v -0.008561 -0.000000 0.008187 +v -0.008374 -0.000000 0.008191 +v -0.008155 -0.000000 0.008197 +v -0.007906 -0.000000 0.008202 +v -0.007630 -0.000000 0.008205 +v -0.007332 -0.000000 0.008207 +v -0.007013 -0.000000 0.008211 +v -0.006677 -0.000000 0.008212 +v -0.006329 -0.000000 0.008213 +v -0.006277 -0.000000 0.008213 +v -0.006136 -0.000000 0.008213 +v -0.005922 -0.000000 0.008213 +v -0.005654 -0.000000 0.008213 +v -0.005350 -0.000000 0.008213 +v -0.005028 -0.000000 0.008213 +v -0.004706 -0.000000 0.008213 +v -0.004402 -0.000000 0.008213 +v -0.004134 -0.000000 0.008213 +v -0.003920 -0.000000 0.008213 +v -0.003779 -0.000000 0.008213 +vn -0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.033 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 1//1 20//1 19//1 +f 1//1 21//1 20//1 +f 1//1 22//1 21//1 +f 1//1 23//1 22//1 +f 24//1 23//1 1//1 +f 24//1 25//1 23//1 +f 24//1 26//1 25//1 +f 24//1 27//1 26//1 +f 24//1 28//1 27//1 +f 29//1 28//1 24//1 +f 29//1 30//1 28//1 +f 31//1 30//1 29//1 +f 32//1 30//1 31//1 +f 33//1 30//1 32//1 +f 33//1 34//1 30//1 +f 35//1 34//1 33//1 +f 36//1 34//1 35//1 +f 37//1 34//1 36//1 +f 38//1 34//1 37//1 +f 38//1 39//1 34//1 +f 40//1 39//1 38//1 +f 41//1 39//1 40//1 +f 42//1 39//1 41//1 +f 43//1 39//1 42//1 +f 44//1 39//1 43//1 +f 44//1 45//1 39//1 +f 45//1 46//1 39//1 +f 44//1 47//1 45//1 +f 48//1 46//1 45//1 +f 44//1 49//1 47//1 +f 50//1 46//1 48//1 +f 44//1 51//1 49//1 +f 52//1 46//1 50//1 +f 53//1 46//1 52//1 +f 44//1 54//1 51//1 +f 55//1 54//1 44//1 +f 56//1 46//1 53//1 +f 55//1 57//1 54//1 +f 58//1 46//1 56//1 +f 55//1 59//1 57//1 +f 60//1 46//1 58//1 +f 61//1 46//1 60//1 +f 55//1 62//1 59//1 +f 63//1 62//1 55//1 +f 64//1 46//1 61//1 +f 63//1 65//1 62//1 +f 66//1 46//1 64//1 +f 63//1 67//1 65//1 +f 68//1 67//1 63//1 +f 69//1 46//1 66//1 +f 69//1 70//1 46//1 +f 71//1 70//1 69//1 +f 68//1 72//1 67//1 +f 73//1 70//1 71//1 +f 74//1 72//1 68//1 +f 75//1 70//1 73//1 +f 74//1 76//1 72//1 +f 77//1 70//1 75//1 +f 74//1 78//1 76//1 +f 79//1 78//1 74//1 +f 79//1 80//1 78//1 +f 79//1 81//1 80//1 +f 77//1 82//1 70//1 +f 83//1 81//1 79//1 +f 83//1 84//1 81//1 +f 83//1 85//1 84//1 +f 83//1 86//1 85//1 +f 87//1 86//1 83//1 +f 87//1 88//1 86//1 +f 87//1 89//1 88//1 +f 87//1 90//1 89//1 +f 87//1 91//1 90//1 +f 87//1 92//1 91//1 +f 93//1 92//1 87//1 +f 93//1 94//1 92//1 +f 93//1 95//1 94//1 +f 93//1 77//1 95//1 +f 93//1 82//1 77//1 +f 93//1 96//1 82//1 +f 97//1 96//1 93//1 +f 98//1 96//1 97//1 +f 99//1 96//1 98//1 +f 99//1 100//1 96//1 +f 101//1 100//1 99//1 +f 102//1 103//1 101//1 +f 103//1 100//1 101//1 +f 103//1 104//1 100//1 +f 104//1 105//1 100//1 +f 105//1 106//1 100//1 +f 102//1 107//1 103//1 +f 102//1 108//1 107//1 +f 102//1 109//1 108//1 +f 110//1 106//1 105//1 +f 111//1 109//1 102//1 +f 111//1 112//1 109//1 +f 111//1 113//1 112//1 +f 111//1 114//1 113//1 +f 111//1 115//1 114//1 +f 111//1 116//1 115//1 +f 111//1 117//1 116//1 +f 118//1 117//1 111//1 +f 118//1 119//1 117//1 +f 118//1 120//1 119//1 +f 118//1 121//1 120//1 +f 118//1 122//1 121//1 +f 118//1 123//1 122//1 +f 118//1 124//1 123//1 +f 125//1 124//1 118//1 +f 125//1 126//1 124//1 +f 110//1 127//1 106//1 +f 125//1 128//1 126//1 +f 129//1 127//1 110//1 +f 125//1 130//1 128//1 +f 131//1 130//1 125//1 +f 131//1 132//1 130//1 +f 131//1 133//1 132//1 +f 131//1 134//1 133//1 +f 131//1 135//1 134//1 +f 136//1 135//1 131//1 +f 136//1 137//1 135//1 +f 136//1 138//1 137//1 +f 129//1 139//1 127//1 +f 136//1 140//1 138//1 +f 141//1 140//1 136//1 +f 142//1 140//1 141//1 +f 129//1 143//1 139//1 +f 129//1 144//1 143//1 +f 129//1 145//1 144//1 +f 129//1 146//1 145//1 +f 129//1 147//1 146//1 +f 148//1 140//1 142//1 +f 129//1 149//1 147//1 +f 129//1 150//1 149//1 +f 129//1 151//1 150//1 +f 129//1 152//1 151//1 +f 129//1 153//1 152//1 +f 154//1 140//1 148//1 +f 155//1 140//1 154//1 +f 129//1 156//1 153//1 +f 157//1 140//1 155//1 +f 129//1 158//1 156//1 +f 159//1 140//1 157//1 +f 160//1 140//1 159//1 +f 161//1 140//1 160//1 +f 162//1 140//1 161//1 +f 163//1 140//1 162//1 +f 164//1 140//1 163//1 +f 165//1 140//1 164//1 +f 166//1 140//1 165//1 +f 167//1 140//1 166//1 +f 168//2 140//2 167//2 +f 169//2 140//2 168//2 +f 170//2 140//2 169//2 +f 171//2 140//2 170//2 +f 172//2 140//2 171//2 +f 173//2 140//2 172//2 +f 174//2 140//2 173//2 +f 175//2 140//2 174//2 +f 176//2 140//2 175//2 +f 177//2 140//2 176//2 +f 178//2 140//2 177//2 diff --git a/alphanumeric/B.mtl b/alphanumeric/B.mtl new file mode 100644 index 0000000..a69b05b --- /dev/null +++ b/alphanumeric/B.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.036 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/B.obj b/alphanumeric/B.obj new file mode 100644 index 0000000..2cff977 --- /dev/null +++ b/alphanumeric/B.obj @@ -0,0 +1,509 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib B.mtl +o B +v -0.007549 -0.000000 0.000000 +v -0.007444 0.000000 -0.008225 +v -0.007549 0.000000 -0.008225 +v -0.007153 0.000000 -0.008225 +v -0.006713 0.000000 -0.008225 +v -0.006162 0.000000 -0.008225 +v -0.005536 0.000000 -0.008225 +v -0.004874 0.000000 -0.008225 +v -0.004211 0.000000 -0.008225 +v -0.003585 0.000000 -0.008225 +v -0.003034 0.000000 -0.008225 +v -0.002594 0.000000 -0.008225 +v -0.002303 0.000000 -0.008225 +v -0.002198 0.000000 -0.008225 +v -0.001019 0.000000 -0.008224 +v -0.000023 0.000000 -0.008223 +v 0.000810 0.000000 -0.008219 +v -0.002447 0.000000 -0.004890 +v 0.001498 0.000000 -0.008212 +v 0.002057 0.000000 -0.008202 +v 0.002508 0.000000 -0.008188 +v 0.002868 0.000000 -0.008168 +v 0.003155 0.000000 -0.008142 +v 0.003387 0.000000 -0.008109 +v 0.003583 0.000000 -0.008069 +v 0.003760 0.000000 -0.008021 +v 0.003938 0.000000 -0.007964 +v 0.004214 0.000000 -0.007864 +v 0.004477 0.000000 -0.007753 +v 0.004726 0.000000 -0.007629 +v 0.004962 0.000000 -0.007493 +v 0.005186 0.000000 -0.007343 +v 0.005397 0.000000 -0.007181 +v 0.005596 0.000000 -0.007006 +v 0.005784 0.000000 -0.006817 +v 0.005961 0.000000 -0.006614 +v 0.006127 0.000000 -0.006398 +v 0.006282 0.000000 -0.006168 +v 0.006427 0.000000 -0.005923 +v 0.006482 0.000000 -0.005820 +v 0.006529 0.000000 -0.005720 +v 0.006569 0.000000 -0.005621 +v 0.006603 0.000000 -0.005518 +v 0.006630 0.000000 -0.005409 +v 0.006652 0.000000 -0.005290 +v 0.006670 0.000000 -0.005158 +v 0.006682 0.000000 -0.005009 +v 0.006691 0.000000 -0.004840 +v -0.002447 0.000000 -0.003385 +v -0.000692 0.000000 -0.004840 +v 0.001063 0.000000 -0.004803 +v 0.006697 0.000000 -0.004648 +v 0.001070 0.000000 -0.004795 +v 0.001090 0.000000 -0.004773 +v 0.001119 0.000000 -0.004739 +v 0.001156 0.000000 -0.004697 +v 0.001199 0.000000 -0.004649 +v 0.001243 0.000000 -0.004598 +v 0.006700 0.000000 -0.004429 +v 0.001288 0.000000 -0.004547 +v 0.001330 0.000000 -0.004499 +v 0.001367 0.000000 -0.004457 +v 0.001397 0.000000 -0.004423 +v 0.006701 0.000000 -0.004181 +v 0.001417 0.000000 -0.004401 +v 0.001424 0.000000 -0.004392 +v 0.001491 0.000000 -0.004314 +v 0.001550 0.000000 -0.004240 +v 0.001601 0.000000 -0.004169 +v 0.006700 0.000000 -0.003935 +v 0.001645 0.000000 -0.004098 +v 0.001681 0.000000 -0.004028 +v 0.001712 0.000000 -0.003955 +v 0.001736 0.000000 -0.003880 +v 0.006696 0.000000 -0.003717 +v 0.001755 0.000000 -0.003799 +v 0.001769 0.000000 -0.003712 +v 0.006689 0.000000 -0.003526 +v 0.001778 0.000000 -0.003617 +v 0.001783 0.000000 -0.003512 +v 0.006679 0.000000 -0.003356 +v 0.001785 0.000000 -0.003397 +v 0.001772 0.000000 -0.003114 +v -0.002447 0.000000 -0.001879 +v 0.006666 0.000000 -0.003206 +v 0.006648 0.000000 -0.003072 +v 0.001733 0.000000 -0.002866 +v 0.006625 0.000000 -0.002951 +v 0.006597 0.000000 -0.002841 +v 0.001663 0.000000 -0.002651 +v 0.006564 0.000000 -0.002737 +v 0.006525 0.000000 -0.002638 +v 0.001559 0.000000 -0.002467 +v 0.006479 0.000000 -0.002540 +v 0.006427 0.000000 -0.002439 +v 0.001417 0.000000 -0.002313 +v 0.006340 0.000000 -0.002285 +v 0.001232 0.000000 -0.002185 +v 0.006243 0.000000 -0.002133 +v 0.001003 0.000000 -0.002083 +v 0.006135 0.000000 -0.001983 +v 0.000725 0.000000 -0.002004 +v 0.000393 0.000000 -0.001946 +v 0.006017 0.000000 -0.001838 +v 0.000006 0.000000 -0.001908 +v -0.000442 0.000000 -0.001886 +v -0.000953 0.000000 -0.001879 +v -0.002417 0.000000 -0.001879 +v -0.002336 0.000000 -0.001879 +v -0.002213 0.000000 -0.001879 +v -0.002060 0.000000 -0.001879 +v -0.001885 0.000000 -0.001879 +v -0.001700 0.000000 -0.001879 +v -0.001515 0.000000 -0.001879 +v -0.001340 0.000000 -0.001879 +v -0.001187 0.000000 -0.001879 +v -0.001064 0.000000 -0.001879 +v -0.000983 0.000000 -0.001879 +v 0.005890 0.000000 -0.001695 +v 0.005753 0.000000 -0.001557 +v 0.005608 0.000000 -0.001423 +v 0.005455 0.000000 -0.001294 +v 0.005295 0.000000 -0.001170 +v 0.005127 0.000000 -0.001052 +v 0.004952 0.000000 -0.000940 +v 0.004772 0.000000 -0.000834 +v 0.004697 0.000000 -0.000791 +v 0.004629 0.000000 -0.000749 +v 0.004568 0.000000 -0.000710 +v 0.004514 0.000000 -0.000673 +v 0.004469 0.000000 -0.000638 +v 0.004431 0.000000 -0.000607 +v 0.004402 0.000000 -0.000579 +v 0.004382 0.000000 -0.000555 +v 0.004371 0.000000 -0.000536 +v 0.004370 0.000000 -0.000522 +v 0.004379 0.000000 -0.000513 +v 0.004398 0.000000 -0.000510 +v 0.004513 0.000000 -0.000496 +v 0.004660 0.000000 -0.000454 +v 0.004833 0.000000 -0.000388 +v 0.005025 0.000000 -0.000303 +v 0.005231 0.000000 -0.000201 +v 0.005445 0.000000 -0.000086 +v 0.005660 -0.000000 0.000039 +v -0.007549 -0.000000 0.008225 +v -0.002447 -0.000000 0.001356 +v 0.005871 -0.000000 0.000170 +v 0.006070 -0.000000 0.000303 +v 0.006252 -0.000000 0.000436 +v 0.006410 -0.000000 0.000564 +v 0.006539 -0.000000 0.000684 +v 0.006732 -0.000000 0.000906 +v 0.006905 -0.000000 0.001148 +v -0.000704 -0.000000 0.001356 +v -0.000396 -0.000000 0.001357 +v 0.007059 -0.000000 0.001407 +v -0.002447 -0.000000 0.002974 +v -0.000115 -0.000000 0.001361 +v 0.000139 -0.000000 0.001366 +v 0.000368 -0.000000 0.001375 +v 0.000574 -0.000000 0.001386 +v 0.000760 -0.000000 0.001400 +v 0.000925 -0.000000 0.001417 +v 0.007193 -0.000000 0.001682 +v 0.001073 -0.000000 0.001437 +v 0.001205 -0.000000 0.001461 +v 0.001323 -0.000000 0.001489 +v 0.001428 -0.000000 0.001520 +v 0.001523 -0.000000 0.001555 +v 0.001677 -0.000000 0.001620 +v 0.001814 -0.000000 0.001692 +v 0.007307 -0.000000 0.001972 +v 0.001934 -0.000000 0.001771 +v 0.002040 -0.000000 0.001859 +v 0.002131 -0.000000 0.001956 +v 0.002208 -0.000000 0.002062 +v 0.007399 -0.000000 0.002275 +v 0.002271 -0.000000 0.002180 +v 0.002322 -0.000000 0.002309 +v 0.007470 -0.000000 0.002589 +v 0.002360 -0.000000 0.002451 +v 0.002386 -0.000000 0.002607 +v 0.007519 -0.000000 0.002913 +v 0.002402 -0.000000 0.002776 +v 0.002407 -0.000000 0.002961 +v 0.007545 -0.000000 0.003244 +v 0.002405 -0.000000 0.003077 +v -0.002447 -0.000000 0.004591 +v 0.002400 -0.000000 0.003183 +v 0.002391 -0.000000 0.003280 +v 0.007549 -0.000000 0.003581 +v 0.002378 -0.000000 0.003369 +v 0.002361 -0.000000 0.003451 +v 0.002339 -0.000000 0.003529 +v 0.002311 -0.000000 0.003603 +v 0.007529 -0.000000 0.003923 +v 0.002278 -0.000000 0.003674 +v 0.002239 -0.000000 0.003743 +v 0.002194 -0.000000 0.003813 +v 0.002142 -0.000000 0.003884 +v 0.002083 -0.000000 0.003957 +v 0.007485 -0.000000 0.004268 +v 0.001972 -0.000000 0.004079 +v 0.001852 -0.000000 0.004185 +v 0.001720 -0.000000 0.004276 +v 0.007442 -0.000000 0.004509 +v 0.001570 -0.000000 0.004353 +v 0.001399 -0.000000 0.004418 +v 0.001201 -0.000000 0.004470 +v 0.000973 -0.000000 0.004511 +v 0.007387 -0.000000 0.004743 +v 0.000709 -0.000000 0.004543 +v 0.000405 -0.000000 0.004566 +v 0.000057 -0.000000 0.004581 +v -0.000341 -0.000000 0.004589 +v -0.000791 -0.000000 0.004591 +v -0.002414 -0.000000 0.004591 +v -0.002324 -0.000000 0.004591 +v -0.002188 -0.000000 0.004591 +v -0.002018 -0.000000 0.004591 +v -0.001824 -0.000000 0.004591 +v -0.001619 -0.000000 0.004591 +v -0.001414 -0.000000 0.004591 +v -0.001221 -0.000000 0.004591 +v -0.001050 -0.000000 0.004591 +v -0.000914 -0.000000 0.004591 +v -0.000824 -0.000000 0.004591 +v 0.007320 -0.000000 0.004970 +v 0.007242 -0.000000 0.005189 +v 0.007151 -0.000000 0.005402 +v 0.007048 -0.000000 0.005608 +v 0.006932 -0.000000 0.005809 +v 0.006803 -0.000000 0.006005 +v 0.006660 -0.000000 0.006196 +v 0.006505 -0.000000 0.006382 +v 0.006336 -0.000000 0.006565 +v 0.006153 -0.000000 0.006744 +v 0.005835 -0.000000 0.007029 +v 0.005520 -0.000000 0.007271 +v 0.005184 -0.000000 0.007475 +v 0.004807 -0.000000 0.007643 +v 0.004365 -0.000000 0.007779 +v 0.003837 -0.000000 0.007887 +v 0.003199 -0.000000 0.007971 +v 0.002430 -0.000000 0.008035 +v 0.001506 -0.000000 0.008082 +v 0.000407 -0.000000 0.008117 +v -0.000891 -0.000000 0.008142 +v -0.002409 -0.000000 0.008162 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.036 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 18//1 19//1 17//1 +f 18//1 20//1 19//1 +f 18//1 21//1 20//1 +f 18//1 22//1 21//1 +f 18//1 23//1 22//1 +f 18//1 24//1 23//1 +f 18//1 25//1 24//1 +f 18//1 26//1 25//1 +f 18//1 27//1 26//1 +f 18//1 28//1 27//1 +f 18//1 29//1 28//1 +f 18//1 30//1 29//1 +f 18//1 31//1 30//1 +f 18//1 32//1 31//1 +f 18//1 33//1 32//1 +f 18//1 34//1 33//1 +f 18//1 35//1 34//1 +f 18//1 36//1 35//1 +f 18//1 37//1 36//1 +f 18//1 38//1 37//1 +f 18//1 39//1 38//1 +f 18//1 40//1 39//1 +f 18//1 41//1 40//1 +f 18//1 42//1 41//1 +f 18//1 43//1 42//1 +f 18//1 44//1 43//1 +f 18//1 45//1 44//1 +f 18//1 46//1 45//1 +f 18//1 47//1 46//1 +f 18//1 48//1 47//1 +f 1//1 49//1 18//1 +f 50//1 48//1 18//1 +f 51//1 48//1 50//1 +f 51//1 52//1 48//1 +f 53//1 52//1 51//1 +f 54//1 52//1 53//1 +f 55//1 52//1 54//1 +f 56//1 52//1 55//1 +f 57//1 52//1 56//1 +f 58//1 52//1 57//1 +f 58//1 59//1 52//1 +f 60//1 59//1 58//1 +f 61//1 59//1 60//1 +f 62//1 59//1 61//1 +f 63//1 59//1 62//1 +f 63//1 64//1 59//1 +f 65//1 64//1 63//1 +f 66//1 64//1 65//1 +f 67//1 64//1 66//1 +f 68//1 64//1 67//1 +f 69//1 64//1 68//1 +f 69//1 70//1 64//1 +f 71//1 70//1 69//1 +f 72//1 70//1 71//1 +f 73//1 70//1 72//1 +f 74//1 70//1 73//1 +f 74//1 75//1 70//1 +f 76//1 75//1 74//1 +f 77//1 75//1 76//1 +f 77//1 78//1 75//1 +f 79//1 78//1 77//1 +f 80//1 78//1 79//1 +f 80//1 81//1 78//1 +f 82//1 81//1 80//1 +f 83//1 81//1 82//1 +f 1//1 84//1 49//1 +f 83//1 85//1 81//1 +f 83//1 86//1 85//1 +f 87//1 86//1 83//1 +f 87//1 88//1 86//1 +f 87//1 89//1 88//1 +f 90//1 89//1 87//1 +f 90//1 91//1 89//1 +f 90//1 92//1 91//1 +f 93//1 92//1 90//1 +f 93//1 94//1 92//1 +f 93//1 95//1 94//1 +f 96//1 95//1 93//1 +f 96//1 97//1 95//1 +f 98//1 97//1 96//1 +f 98//1 99//1 97//1 +f 100//1 99//1 98//1 +f 100//1 101//1 99//1 +f 102//1 101//1 100//1 +f 103//1 101//1 102//1 +f 103//1 104//1 101//1 +f 105//1 104//1 103//1 +f 106//1 104//1 105//1 +f 107//1 104//1 106//1 +f 1//1 108//1 84//1 +f 1//1 109//1 108//1 +f 1//1 110//1 109//1 +f 1//1 111//1 110//1 +f 1//1 112//1 111//1 +f 1//1 113//1 112//1 +f 1//1 114//1 113//1 +f 1//1 115//1 114//1 +f 1//1 116//1 115//1 +f 1//1 117//1 116//1 +f 1//1 118//1 117//1 +f 1//1 107//1 118//1 +f 1//1 104//1 107//1 +f 1//1 119//1 104//1 +f 1//1 120//1 119//1 +f 1//1 121//1 120//1 +f 1//1 122//1 121//1 +f 1//1 123//1 122//1 +f 1//1 124//1 123//1 +f 1//1 125//1 124//1 +f 1//1 126//1 125//1 +f 1//1 127//1 126//1 +f 1//1 128//1 127//1 +f 1//1 129//1 128//1 +f 1//1 130//1 129//1 +f 1//1 131//1 130//1 +f 1//1 132//1 131//1 +f 1//1 133//1 132//1 +f 1//1 134//1 133//1 +f 1//1 135//1 134//1 +f 1//1 136//1 135//1 +f 1//1 137//1 136//1 +f 1//1 138//1 137//1 +f 1//1 139//1 138//1 +f 1//1 140//1 139//1 +f 1//1 141//1 140//1 +f 1//1 142//1 141//1 +f 1//1 143//1 142//1 +f 1//1 144//1 143//1 +f 1//1 145//1 144//1 +f 146//1 147//1 1//1 +f 147//1 145//1 1//1 +f 147//1 148//1 145//1 +f 147//1 149//1 148//1 +f 147//1 150//1 149//1 +f 147//1 151//1 150//1 +f 147//1 152//1 151//1 +f 147//1 153//1 152//1 +f 147//1 154//1 153//1 +f 147//1 155//1 154//1 +f 155//1 156//1 154//1 +f 156//1 157//1 154//1 +f 146//1 158//1 147//1 +f 159//1 157//1 156//1 +f 160//1 157//1 159//1 +f 161//1 157//1 160//1 +f 162//1 157//1 161//1 +f 163//1 157//1 162//1 +f 164//1 157//1 163//1 +f 164//1 165//1 157//1 +f 166//1 165//1 164//1 +f 167//1 165//1 166//1 +f 168//1 165//1 167//1 +f 169//1 165//1 168//1 +f 170//1 165//1 169//1 +f 171//1 165//1 170//1 +f 172//1 165//1 171//1 +f 172//1 173//1 165//1 +f 174//1 173//1 172//1 +f 175//1 173//1 174//1 +f 176//1 173//1 175//1 +f 177//1 173//1 176//1 +f 177//1 178//1 173//1 +f 179//1 178//1 177//1 +f 180//1 178//1 179//1 +f 180//1 181//1 178//1 +f 182//1 181//1 180//1 +f 183//1 181//1 182//1 +f 183//1 184//1 181//1 +f 185//1 184//1 183//1 +f 186//1 184//1 185//1 +f 186//1 187//1 184//1 +f 188//1 187//1 186//1 +f 146//1 189//1 158//1 +f 190//1 187//1 188//1 +f 191//1 187//1 190//1 +f 191//1 192//1 187//1 +f 193//1 192//1 191//1 +f 194//1 192//1 193//1 +f 195//1 192//1 194//1 +f 196//1 192//1 195//1 +f 196//1 197//1 192//1 +f 198//1 197//1 196//1 +f 199//1 197//1 198//1 +f 200//1 197//1 199//1 +f 201//1 197//1 200//1 +f 202//1 197//1 201//1 +f 202//1 203//1 197//1 +f 204//1 203//1 202//1 +f 205//1 203//1 204//1 +f 206//1 203//1 205//1 +f 206//1 207//1 203//1 +f 208//1 207//1 206//1 +f 209//1 207//1 208//1 +f 210//1 207//1 209//1 +f 211//1 207//1 210//1 +f 211//1 212//1 207//1 +f 213//1 212//1 211//1 +f 214//1 212//1 213//1 +f 215//1 212//1 214//1 +f 216//1 212//1 215//1 +f 217//1 212//1 216//1 +f 146//1 218//1 189//1 +f 146//1 219//1 218//1 +f 146//1 220//1 219//1 +f 146//1 221//1 220//1 +f 146//1 222//1 221//1 +f 146//1 223//1 222//1 +f 146//1 224//1 223//1 +f 146//1 225//1 224//1 +f 146//1 226//1 225//1 +f 146//1 227//1 226//1 +f 146//1 228//1 227//1 +f 146//1 217//1 228//1 +f 146//1 212//1 217//1 +f 146//1 229//1 212//1 +f 146//1 230//1 229//1 +f 146//1 231//1 230//1 +f 146//1 232//1 231//1 +f 146//1 233//1 232//1 +f 146//1 234//1 233//1 +f 146//1 235//1 234//1 +f 146//1 236//1 235//1 +f 146//1 237//1 236//1 +f 146//1 238//1 237//1 +f 146//1 239//1 238//1 +f 146//1 240//1 239//1 +f 146//1 241//1 240//1 +f 146//1 242//1 241//1 +f 146//1 243//1 242//1 +f 146//1 244//1 243//1 +f 146//1 245//1 244//1 +f 146//1 246//1 245//1 +f 146//1 247//1 246//1 +f 146//1 248//1 247//1 +f 146//1 249//1 248//1 +f 146//1 250//1 249//1 diff --git a/alphanumeric/C.mtl b/alphanumeric/C.mtl new file mode 100644 index 0000000..10ee63a --- /dev/null +++ b/alphanumeric/C.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.037 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/C.obj b/alphanumeric/C.obj new file mode 100644 index 0000000..f410554 --- /dev/null +++ b/alphanumeric/C.obj @@ -0,0 +1,461 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib C.mtl +o C +v 0.000013 0.000000 -0.008446 +v 0.000680 0.000000 -0.008442 +v 0.000345 0.000000 -0.008449 +v -0.000315 0.000000 -0.008437 +v 0.001013 0.000000 -0.008430 +v -0.000638 0.000000 -0.008418 +v 0.001341 0.000000 -0.008408 +v -0.000950 0.000000 -0.008391 +v 0.001664 0.000000 -0.008380 +v -0.001625 0.000000 -0.008305 +v 0.001979 0.000000 -0.008346 +v 0.002283 0.000000 -0.008306 +v 0.002571 0.000000 -0.008259 +v -0.002273 0.000000 -0.008174 +v 0.002846 0.000000 -0.008205 +v 0.003249 0.000000 -0.008104 +v -0.002894 0.000000 -0.008000 +v 0.003634 0.000000 -0.007985 +v -0.003483 0.000000 -0.007784 +v 0.004002 0.000000 -0.007850 +v 0.004351 0.000000 -0.007697 +v -0.004043 0.000000 -0.007527 +v 0.004685 0.000000 -0.007527 +v 0.005005 0.000000 -0.007337 +v -0.004570 0.000000 -0.007229 +v 0.005309 0.000000 -0.007128 +v -0.005065 0.000000 -0.006893 +v 0.005598 0.000000 -0.006899 +v 0.005877 0.000000 -0.006651 +v -0.005524 0.000000 -0.006519 +v 0.006142 0.000000 -0.006382 +v -0.005948 0.000000 -0.006109 +v 0.006396 0.000000 -0.006091 +v -0.006334 0.000000 -0.005660 +v 0.006640 0.000000 -0.005778 +v 0.006770 0.000000 -0.005590 +v -0.006680 0.000000 -0.005176 +v 0.006909 0.000000 -0.005371 +v 0.007050 0.000000 -0.005131 +v -0.006986 0.000000 -0.004658 +v 0.007190 0.000000 -0.004876 +v 0.000511 0.000000 -0.004641 +v 0.007325 0.000000 -0.004618 +v -0.007241 0.000000 -0.004112 +v 0.000014 0.000000 -0.004608 +v 0.001006 0.000000 -0.004589 +v 0.007453 0.000000 -0.004362 +v -0.000471 0.000000 -0.004498 +v 0.001492 0.000000 -0.004453 +v -0.000938 0.000000 -0.004314 +v 0.001955 0.000000 -0.004229 +v 0.007568 0.000000 -0.004118 +v -0.001372 0.000000 -0.004056 +v 0.002385 0.000000 -0.003913 +v 0.007664 0.000000 -0.003893 +v -0.007459 0.000000 -0.003506 +v -0.001764 0.000000 -0.003728 +v 0.002463 0.000000 -0.003843 +v 0.007740 0.000000 -0.003698 +v 0.002542 0.000000 -0.003767 +v 0.002619 0.000000 -0.003687 +v -0.002103 0.000000 -0.003336 +v 0.007790 0.000000 -0.003539 +v 0.002695 0.000000 -0.003602 +v 0.002769 0.000000 -0.003514 +v 0.007812 0.000000 -0.003425 +v 0.002838 0.000000 -0.003425 +v -0.007637 0.000000 -0.002850 +v 0.002906 0.000000 -0.003336 +v 0.007798 0.000000 -0.003364 +v 0.007707 0.000000 -0.003330 +v -0.002379 0.000000 -0.002882 +v 0.002966 0.000000 -0.003247 +v 0.007480 0.000000 -0.003269 +v 0.007143 0.000000 -0.003187 +v 0.003022 0.000000 -0.003161 +v 0.006725 0.000000 -0.003089 +v 0.003070 0.000000 -0.003079 +v 0.006251 0.000000 -0.002983 +v 0.003112 0.000000 -0.003001 +v 0.003144 0.000000 -0.002929 +v 0.005745 0.000000 -0.002870 +v 0.003173 0.000000 -0.002860 +v -0.002581 0.000000 -0.002369 +v 0.005235 0.000000 -0.002759 +v 0.003204 0.000000 -0.002793 +v -0.007777 0.000000 -0.002157 +v 0.003237 0.000000 -0.002730 +v 0.004747 0.000000 -0.002656 +v 0.003272 0.000000 -0.002673 +v 0.003307 0.000000 -0.002620 +v 0.004309 0.000000 -0.002566 +v 0.003342 0.000000 -0.002573 +v 0.003378 0.000000 -0.002531 +v 0.003944 0.000000 -0.002496 +v 0.003414 0.000000 -0.002497 +v 0.003448 0.000000 -0.002469 +v 0.003679 0.000000 -0.002448 +v 0.003481 0.000000 -0.002448 +v 0.003512 0.000000 -0.002436 +v 0.003543 0.000000 -0.002432 +v -0.002643 0.000000 -0.002130 +v -0.007879 0.000000 -0.001434 +v -0.002697 0.000000 -0.001858 +v -0.002743 0.000000 -0.001558 +v -0.002782 0.000000 -0.001237 +v -0.007938 0.000000 -0.000695 +v -0.002811 0.000000 -0.000898 +v -0.002834 0.000000 -0.000549 +v -0.007959 -0.000000 0.000049 +v -0.002846 0.000000 -0.000194 +v -0.002850 -0.000000 0.000161 +v -0.007938 -0.000000 0.000790 +v -0.002844 -0.000000 0.000511 +v -0.002828 -0.000000 0.000851 +v -0.007876 -0.000000 0.001515 +v -0.002803 -0.000000 0.001174 +v -0.002768 -0.000000 0.001476 +v -0.002672 -0.000000 0.002021 +v -0.007772 -0.000000 0.002213 +v 0.003636 -0.000000 0.001557 +v 0.003691 -0.000000 0.001556 +v 0.003655 -0.000000 0.001551 +v 0.003762 -0.000000 0.001573 +v 0.003615 -0.000000 0.001577 +v 0.003866 -0.000000 0.001601 +v 0.003591 -0.000000 0.001609 +v 0.004000 -0.000000 0.001637 +v 0.003565 -0.000000 0.001652 +v 0.004161 -0.000000 0.001683 +v 0.003536 -0.000000 0.001706 +v 0.004344 -0.000000 0.001736 +v 0.003505 -0.000000 0.001768 +v 0.004552 -0.000000 0.001796 +v 0.003474 -0.000000 0.001839 +v 0.004777 -0.000000 0.001862 +v 0.003442 -0.000000 0.001917 +v 0.005019 -0.000000 0.001934 +v 0.003410 -0.000000 0.002001 +v 0.005276 -0.000000 0.002010 +v 0.003377 -0.000000 0.002091 +v 0.005544 -0.000000 0.002089 +v -0.002546 -0.000000 0.002514 +v 0.005819 -0.000000 0.002173 +v 0.003346 -0.000000 0.002187 +v 0.006098 -0.000000 0.002258 +v 0.003319 -0.000000 0.002285 +v -0.007625 -0.000000 0.002876 +v 0.006367 -0.000000 0.002340 +v 0.003199 -0.000000 0.002638 +v 0.006621 -0.000000 0.002417 +v 0.006859 -0.000000 0.002488 +v 0.007081 -0.000000 0.002555 +v -0.002389 -0.000000 0.002953 +v 0.007281 -0.000000 0.002613 +v 0.007459 -0.000000 0.002667 +v 0.003057 -0.000000 0.002963 +v 0.007612 -0.000000 0.002711 +v 0.007738 -0.000000 0.002747 +v 0.007833 -0.000000 0.002773 +v 0.007895 -0.000000 0.002789 +v 0.007923 -0.000000 0.002794 +v 0.007950 -0.000000 0.002816 +v 0.007959 -0.000000 0.002877 +v -0.007435 -0.000000 0.003492 +v 0.007951 -0.000000 0.002974 +v -0.002201 -0.000000 0.003340 +v 0.002897 -0.000000 0.003259 +v 0.007927 -0.000000 0.003102 +v 0.007889 -0.000000 0.003258 +v 0.007838 -0.000000 0.003436 +v 0.002715 -0.000000 0.003527 +v -0.001980 -0.000000 0.003676 +v 0.007773 -0.000000 0.003635 +v -0.007190 -0.000000 0.004095 +v 0.002511 -0.000000 0.003767 +v 0.007697 -0.000000 0.003847 +v -0.001725 -0.000000 0.003961 +v 0.002289 -0.000000 0.003977 +v 0.007614 -0.000000 0.004070 +v -0.001437 -0.000000 0.004196 +v 0.002045 -0.000000 0.004161 +v 0.007519 -0.000000 0.004300 +v -0.006908 -0.000000 0.004665 +v 0.001782 -0.000000 0.004314 +v -0.001115 -0.000000 0.004380 +v 0.007420 -0.000000 0.004531 +v 0.001497 -0.000000 0.004438 +v -0.000759 -0.000000 0.004517 +v 0.001195 -0.000000 0.004533 +v -0.000366 -0.000000 0.004604 +v 0.007314 -0.000000 0.004761 +v 0.000873 -0.000000 0.004599 +v 0.000530 -0.000000 0.004636 +v 0.000064 -0.000000 0.004643 +v -0.006594 -0.000000 0.005197 +v 0.007081 -0.000000 0.005199 +v -0.006243 -0.000000 0.005691 +v 0.006824 -0.000000 0.005615 +v 0.006539 -0.000000 0.006005 +v -0.005861 -0.000000 0.006148 +v 0.006231 -0.000000 0.006371 +v -0.005445 -0.000000 0.006567 +v 0.005898 -0.000000 0.006708 +v -0.004998 -0.000000 0.006946 +v 0.005542 -0.000000 0.007021 +v -0.004519 -0.000000 0.007285 +v 0.005165 -0.000000 0.007304 +v -0.004010 -0.000000 0.007583 +v 0.004765 -0.000000 0.007560 +v 0.004344 -0.000000 0.007786 +v -0.003471 -0.000000 0.007842 +v 0.003905 -0.000000 0.007981 +v -0.002904 -0.000000 0.008059 +v 0.003447 -0.000000 0.008148 +v -0.002308 -0.000000 0.008232 +v 0.002970 -0.000000 0.008282 +v -0.001992 -0.000000 0.008292 +v 0.002674 -0.000000 0.008338 +v -0.001607 -0.000000 0.008344 +v 0.002304 -0.000000 0.008383 +v -0.001162 -0.000000 0.008384 +v 0.001873 -0.000000 0.008415 +v -0.000676 -0.000000 0.008416 +v 0.001397 -0.000000 0.008438 +v -0.000162 -0.000000 0.008438 +v 0.000363 -0.000000 0.008449 +v 0.000889 -0.000000 0.008449 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.037 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 10//1 12//1 11//1 +f 10//1 13//1 12//1 +f 14//1 13//1 10//1 +f 14//1 15//1 13//1 +f 14//1 16//1 15//1 +f 17//1 16//1 14//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 19//1 20//1 18//1 +f 19//1 21//1 20//1 +f 22//1 21//1 19//1 +f 22//1 23//1 21//1 +f 22//1 24//1 23//1 +f 25//1 24//1 22//1 +f 25//1 26//1 24//1 +f 27//1 26//1 25//1 +f 27//1 28//1 26//1 +f 27//1 29//1 28//1 +f 30//1 29//1 27//1 +f 30//1 31//1 29//1 +f 32//1 31//1 30//1 +f 32//1 33//1 31//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 34//1 36//1 35//1 +f 37//1 36//1 34//1 +f 37//1 38//1 36//1 +f 37//1 39//1 38//1 +f 40//1 39//1 37//1 +f 40//1 41//1 39//1 +f 40//1 42//1 41//1 +f 42//1 43//1 41//1 +f 44//1 45//1 40//1 +f 45//1 42//1 40//1 +f 46//1 43//1 42//1 +f 46//1 47//1 43//1 +f 44//1 48//1 45//1 +f 49//1 47//1 46//1 +f 44//1 50//1 48//1 +f 51//1 47//1 49//1 +f 51//1 52//1 47//1 +f 44//1 53//1 50//1 +f 54//1 52//1 51//1 +f 54//1 55//1 52//1 +f 56//1 53//1 44//1 +f 56//1 57//1 53//1 +f 58//1 55//1 54//1 +f 58//1 59//1 55//1 +f 60//1 59//1 58//1 +f 61//1 59//1 60//1 +f 56//1 62//1 57//1 +f 61//1 63//1 59//1 +f 64//1 63//1 61//1 +f 65//1 63//1 64//1 +f 65//1 66//1 63//1 +f 67//1 66//1 65//1 +f 68//1 62//1 56//1 +f 69//1 66//1 67//1 +f 69//1 70//1 66//1 +f 69//1 71//1 70//1 +f 68//1 72//1 62//1 +f 73//1 71//1 69//1 +f 73//1 74//1 71//1 +f 73//1 75//1 74//1 +f 76//1 75//1 73//1 +f 76//1 77//1 75//1 +f 78//1 77//1 76//1 +f 78//1 79//1 77//1 +f 80//1 79//1 78//1 +f 81//1 79//1 80//1 +f 81//1 82//1 79//1 +f 83//1 82//1 81//1 +f 68//1 84//1 72//1 +f 83//1 85//1 82//1 +f 86//1 85//1 83//1 +f 87//1 84//1 68//1 +f 88//1 85//1 86//1 +f 88//1 89//1 85//1 +f 90//1 89//1 88//1 +f 91//1 89//1 90//1 +f 91//1 92//1 89//1 +f 93//1 92//1 91//1 +f 94//1 92//1 93//1 +f 94//1 95//1 92//1 +f 96//1 95//1 94//1 +f 97//1 95//1 96//1 +f 97//1 98//1 95//1 +f 99//1 98//1 97//1 +f 100//1 98//1 99//1 +f 100//1 101//1 98//1 +f 87//1 102//1 84//1 +f 103//1 102//1 87//1 +f 103//1 104//1 102//1 +f 103//1 105//1 104//1 +f 103//1 106//1 105//1 +f 107//1 106//1 103//1 +f 107//1 108//1 106//1 +f 107//1 109//1 108//1 +f 110//1 109//1 107//1 +f 110//1 111//1 109//1 +f 110//1 112//1 111//1 +f 113//1 112//1 110//1 +f 113//1 114//1 112//1 +f 113//1 115//1 114//1 +f 116//1 115//1 113//1 +f 116//1 117//1 115//1 +f 116//1 118//1 117//1 +f 116//1 119//1 118//1 +f 120//1 119//1 116//1 +f 121//1 122//1 123//1 +f 121//1 124//1 122//1 +f 125//1 124//1 121//1 +f 125//1 126//1 124//1 +f 127//1 126//1 125//1 +f 127//1 128//1 126//1 +f 129//1 128//1 127//1 +f 129//1 130//1 128//1 +f 131//1 130//1 129//1 +f 131//1 132//1 130//1 +f 133//1 132//1 131//1 +f 133//1 134//1 132//1 +f 135//1 134//1 133//1 +f 135//1 136//1 134//1 +f 137//1 136//1 135//1 +f 137//1 138//1 136//1 +f 139//1 138//1 137//1 +f 139//1 140//1 138//1 +f 141//1 140//1 139//1 +f 141//1 142//1 140//1 +f 120//1 143//1 119//1 +f 141//1 144//1 142//1 +f 145//1 144//1 141//1 +f 145//1 146//1 144//1 +f 147//1 146//1 145//1 +f 148//1 143//1 120//1 +f 147//1 149//1 146//1 +f 150//1 149//1 147//1 +f 150//1 151//1 149//1 +f 150//1 152//1 151//1 +f 150//1 153//1 152//1 +f 148//1 154//1 143//1 +f 150//1 155//1 153//1 +f 150//1 156//1 155//1 +f 157//1 156//1 150//1 +f 157//1 158//1 156//1 +f 157//1 159//1 158//1 +f 157//1 160//1 159//1 +f 157//1 161//1 160//1 +f 157//1 162//1 161//1 +f 157//1 163//1 162//1 +f 157//1 164//1 163//1 +f 165//1 154//1 148//1 +f 157//1 166//1 164//1 +f 165//1 167//1 154//1 +f 168//1 166//1 157//1 +f 168//1 169//1 166//1 +f 168//1 170//1 169//1 +f 168//1 171//1 170//1 +f 172//1 171//1 168//1 +f 165//1 173//1 167//1 +f 172//1 174//1 171//1 +f 175//1 173//1 165//1 +f 176//1 174//1 172//1 +f 176//1 177//1 174//1 +f 175//1 178//1 173//1 +f 179//1 177//1 176//1 +f 179//1 180//1 177//1 +f 175//1 181//1 178//1 +f 182//1 180//1 179//1 +f 182//1 183//1 180//1 +f 184//1 181//1 175//1 +f 185//1 183//1 182//1 +f 184//1 186//1 181//1 +f 185//1 187//1 183//1 +f 188//1 187//1 185//1 +f 184//1 189//1 186//1 +f 190//1 187//1 188//1 +f 184//1 191//1 189//1 +f 190//1 192//1 187//1 +f 193//1 192//1 190//1 +f 194//1 192//1 193//1 +f 184//1 195//1 191//1 +f 195//1 192//1 194//1 +f 184//1 192//1 195//1 +f 196//1 192//1 184//1 +f 196//1 197//1 192//1 +f 198//1 197//1 196//1 +f 198//1 199//1 197//1 +f 198//1 200//1 199//1 +f 201//1 200//1 198//1 +f 201//1 202//1 200//1 +f 203//1 202//1 201//1 +f 203//1 204//1 202//1 +f 205//1 204//1 203//1 +f 205//1 206//1 204//1 +f 207//1 206//1 205//1 +f 207//1 208//1 206//1 +f 209//1 208//1 207//1 +f 209//1 210//1 208//1 +f 209//1 211//1 210//1 +f 212//1 211//1 209//1 +f 212//1 213//1 211//1 +f 214//1 213//1 212//1 +f 214//1 215//1 213//1 +f 216//1 215//1 214//1 +f 216//1 217//1 215//1 +f 218//1 217//1 216//1 +f 218//1 219//1 217//1 +f 220//1 219//1 218//1 +f 220//1 221//1 219//1 +f 222//1 221//1 220//1 +f 222//1 223//1 221//1 +f 224//1 223//1 222//1 +f 224//1 225//1 223//1 +f 226//1 225//1 224//1 +f 227//1 225//1 226//1 +f 227//1 228//1 225//1 diff --git a/alphanumeric/D.mtl b/alphanumeric/D.mtl new file mode 100644 index 0000000..82260bc --- /dev/null +++ b/alphanumeric/D.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.040 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/D.obj b/alphanumeric/D.obj new file mode 100644 index 0000000..8b9e287 --- /dev/null +++ b/alphanumeric/D.obj @@ -0,0 +1,307 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib D.mtl +o D +v -0.007525 0.000000 -0.000006 +v -0.007462 0.000000 -0.008181 +v -0.007550 0.000000 -0.008181 +v -0.007217 0.000000 -0.008181 +v -0.006847 0.000000 -0.008181 +v -0.006382 0.000000 -0.008181 +v -0.005855 0.000000 -0.008181 +v -0.005297 0.000000 -0.008181 +v -0.004739 0.000000 -0.008181 +v -0.004213 0.000000 -0.008181 +v -0.003749 0.000000 -0.008181 +v -0.003378 0.000000 -0.008181 +v -0.003134 0.000000 -0.008181 +v -0.003045 0.000000 -0.008181 +v -0.001898 0.000000 -0.008179 +v -0.000909 0.000000 -0.008172 +v -0.000063 0.000000 -0.008159 +v 0.000658 0.000000 -0.008138 +v 0.001270 0.000000 -0.008107 +v -0.002448 0.000000 -0.004473 +v 0.001790 0.000000 -0.008064 +v 0.002234 0.000000 -0.008008 +v 0.002619 0.000000 -0.007938 +v 0.002961 0.000000 -0.007850 +v 0.003278 0.000000 -0.007745 +v 0.003585 0.000000 -0.007619 +v 0.003900 0.000000 -0.007472 +v 0.004267 0.000000 -0.007273 +v 0.004626 0.000000 -0.007033 +v 0.004974 0.000000 -0.006757 +v 0.005308 0.000000 -0.006446 +v 0.005626 0.000000 -0.006104 +v 0.005927 0.000000 -0.005736 +v 0.006207 0.000000 -0.005343 +v 0.006462 0.000000 -0.004931 +v 0.006691 0.000000 -0.004501 +v -0.001104 0.000000 -0.004423 +v -0.000848 0.000000 -0.004413 +v 0.006892 0.000000 -0.004057 +v -0.002448 0.000000 -0.000056 +v -0.000619 0.000000 -0.004402 +v -0.000414 0.000000 -0.004388 +v -0.000230 0.000000 -0.004372 +v -0.000064 0.000000 -0.004353 +v 0.000086 0.000000 -0.004330 +v 0.000225 0.000000 -0.004303 +v 0.000354 0.000000 -0.004271 +v 0.000477 0.000000 -0.004234 +v 0.000596 0.000000 -0.004192 +v 0.000716 0.000000 -0.004143 +v 0.000838 0.000000 -0.004087 +v 0.001024 0.000000 -0.003984 +v 0.007062 0.000000 -0.003603 +v 0.001199 0.000000 -0.003866 +v 0.001361 0.000000 -0.003734 +v 0.001511 0.000000 -0.003587 +v 0.007198 0.000000 -0.003142 +v 0.001649 0.000000 -0.003425 +v 0.001776 0.000000 -0.003247 +v 0.001891 0.000000 -0.003054 +v 0.007311 0.000000 -0.002643 +v 0.001994 0.000000 -0.002846 +v 0.002086 0.000000 -0.002621 +v 0.007403 0.000000 -0.002104 +v 0.002166 0.000000 -0.002380 +v 0.002236 0.000000 -0.002122 +v 0.002294 0.000000 -0.001847 +v 0.007471 0.000000 -0.001534 +v 0.002337 0.000000 -0.001533 +v 0.007520 0.000000 -0.000942 +v 0.002368 0.000000 -0.001158 +v 0.002387 0.000000 -0.000738 +v 0.007546 0.000000 -0.000336 +v 0.002395 0.000000 -0.000286 +v 0.007550 -0.000000 0.000274 +v 0.002392 -0.000000 0.000184 +v -0.002448 -0.000000 0.004361 +v -0.007488 -0.000000 0.008181 +v 0.002379 -0.000000 0.000657 +v 0.007532 -0.000000 0.000880 +v 0.002357 -0.000000 0.001119 +v 0.007494 -0.000000 0.001473 +v 0.002325 -0.000000 0.001555 +v 0.007435 -0.000000 0.002045 +v 0.002285 -0.000000 0.001953 +v 0.002237 -0.000000 0.002297 +v 0.007357 -0.000000 0.002586 +v 0.002182 -0.000000 0.002574 +v 0.002120 -0.000000 0.002769 +v 0.007256 -0.000000 0.003087 +v 0.002049 -0.000000 0.002919 +v 0.001974 -0.000000 0.003063 +v 0.001892 -0.000000 0.003201 +v 0.007136 -0.000000 0.003540 +v 0.001808 -0.000000 0.003332 +v 0.001719 -0.000000 0.003456 +v 0.001626 -0.000000 0.003573 +v 0.006997 -0.000000 0.003932 +v 0.001531 -0.000000 0.003682 +v 0.001432 -0.000000 0.003782 +v 0.001330 -0.000000 0.003873 +v 0.001226 -0.000000 0.003955 +v 0.006822 -0.000000 0.004326 +v 0.001120 -0.000000 0.004026 +v 0.001012 -0.000000 0.004088 +v 0.000872 -0.000000 0.004144 +v 0.000676 -0.000000 0.004198 +v 0.000436 -0.000000 0.004251 +v 0.000161 -0.000000 0.004300 +v -0.000139 -0.000000 0.004344 +v 0.006613 -0.000000 0.004719 +v -0.000452 -0.000000 0.004383 +v -0.002440 -0.000000 0.004363 +v -0.002417 -0.000000 0.004367 +v -0.002384 -0.000000 0.004373 +v -0.002341 -0.000000 0.004381 +v -0.002293 -0.000000 0.004390 +v -0.000768 -0.000000 0.004415 +v -0.002243 -0.000000 0.004399 +v -0.002192 -0.000000 0.004408 +v -0.002144 -0.000000 0.004417 +v -0.001078 -0.000000 0.004440 +v -0.002101 -0.000000 0.004424 +v -0.002068 -0.000000 0.004431 +v -0.002045 -0.000000 0.004435 +v -0.002037 -0.000000 0.004436 +v -0.001860 -0.000000 0.004454 +v -0.001370 -0.000000 0.004455 +v -0.001634 -0.000000 0.004460 +v 0.006376 -0.000000 0.005106 +v 0.006111 -0.000000 0.005482 +v 0.005826 -0.000000 0.005842 +v 0.005521 -0.000000 0.006183 +v 0.005203 -0.000000 0.006500 +v 0.004874 -0.000000 0.006788 +v 0.004538 -0.000000 0.007043 +v 0.004200 -0.000000 0.007260 +v 0.003862 -0.000000 0.007435 +v 0.003514 -0.000000 0.007588 +v 0.003175 -0.000000 0.007719 +v 0.002830 -0.000000 0.007830 +v 0.002464 -0.000000 0.007922 +v 0.002059 -0.000000 0.007996 +v 0.001602 -0.000000 0.008055 +v 0.001075 -0.000000 0.008101 +v 0.000465 -0.000000 0.008134 +v -0.000246 -0.000000 0.008158 +v -0.001072 -0.000000 0.008172 +v -0.002029 -0.000000 0.008179 +v -0.003132 -0.000000 0.008181 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.040 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 1//1 20//1 19//1 +f 20//1 21//1 19//1 +f 20//1 22//1 21//1 +f 20//1 23//1 22//1 +f 20//1 24//1 23//1 +f 20//1 25//1 24//1 +f 20//1 26//1 25//1 +f 20//1 27//1 26//1 +f 20//1 28//1 27//1 +f 20//1 29//1 28//1 +f 20//1 30//1 29//1 +f 20//1 31//1 30//1 +f 20//1 32//1 31//1 +f 20//1 33//1 32//1 +f 20//1 34//1 33//1 +f 20//1 35//1 34//1 +f 20//1 36//1 35//1 +f 20//1 37//1 36//1 +f 37//1 38//1 36//1 +f 38//1 39//1 36//1 +f 1//1 40//1 20//1 +f 41//1 39//1 38//1 +f 42//1 39//1 41//1 +f 43//1 39//1 42//1 +f 44//1 39//1 43//1 +f 45//1 39//1 44//1 +f 46//1 39//1 45//1 +f 47//1 39//1 46//1 +f 48//1 39//1 47//1 +f 49//1 39//1 48//1 +f 50//1 39//1 49//1 +f 51//1 39//1 50//1 +f 52//1 39//1 51//1 +f 52//1 53//1 39//1 +f 54//1 53//1 52//1 +f 55//1 53//1 54//1 +f 56//1 53//1 55//1 +f 56//1 57//1 53//1 +f 58//1 57//1 56//1 +f 59//1 57//1 58//1 +f 60//1 57//1 59//1 +f 60//1 61//1 57//1 +f 62//1 61//1 60//1 +f 63//1 61//1 62//1 +f 63//1 64//1 61//1 +f 65//1 64//1 63//1 +f 66//1 64//1 65//1 +f 67//1 64//1 66//1 +f 67//1 68//1 64//1 +f 69//1 68//1 67//1 +f 69//1 70//1 68//1 +f 71//1 70//1 69//1 +f 72//1 70//1 71//1 +f 72//1 73//1 70//1 +f 74//1 73//1 72//1 +f 74//1 75//1 73//1 +f 76//1 75//1 74//1 +f 1//1 77//1 40//1 +f 78//1 77//1 1//1 +f 79//1 75//1 76//1 +f 79//1 80//1 75//1 +f 81//1 80//1 79//1 +f 81//1 82//1 80//1 +f 83//1 82//1 81//1 +f 83//1 84//1 82//1 +f 85//1 84//1 83//1 +f 86//1 84//1 85//1 +f 86//1 87//1 84//1 +f 88//1 87//1 86//1 +f 89//1 87//1 88//1 +f 89//1 90//1 87//1 +f 91//1 90//1 89//1 +f 92//1 90//1 91//1 +f 93//1 90//1 92//1 +f 93//1 94//1 90//1 +f 95//1 94//1 93//1 +f 96//1 94//1 95//1 +f 97//1 94//1 96//1 +f 97//1 98//1 94//1 +f 99//1 98//1 97//1 +f 100//1 98//1 99//1 +f 101//1 98//1 100//1 +f 102//1 98//1 101//1 +f 102//1 103//1 98//1 +f 104//1 103//1 102//1 +f 105//1 103//1 104//1 +f 106//1 103//1 105//1 +f 107//1 103//1 106//1 +f 108//1 103//1 107//1 +f 109//1 103//1 108//1 +f 110//1 103//1 109//1 +f 110//1 111//1 103//1 +f 112//1 111//1 110//1 +f 78//1 113//1 77//1 +f 78//1 114//1 113//1 +f 78//1 115//1 114//1 +f 78//1 116//1 115//1 +f 78//1 117//1 116//1 +f 118//1 111//1 112//1 +f 78//1 119//1 117//1 +f 78//1 120//1 119//1 +f 78//1 121//1 120//1 +f 122//1 111//1 118//1 +f 78//1 123//1 121//1 +f 78//1 124//1 123//1 +f 78//1 125//1 124//1 +f 78//1 126//1 125//1 +f 78//1 127//1 126//1 +f 128//1 111//1 122//1 +f 78//1 129//1 127//1 +f 129//1 111//1 128//1 +f 78//1 111//1 129//1 +f 78//1 130//1 111//1 +f 78//1 131//1 130//1 +f 78//1 132//1 131//1 +f 78//1 133//1 132//1 +f 78//1 134//1 133//1 +f 78//1 135//1 134//1 +f 78//1 136//1 135//1 +f 78//1 137//1 136//1 +f 78//1 138//1 137//1 +f 78//1 139//1 138//1 +f 78//1 140//1 139//1 +f 78//1 141//1 140//1 +f 78//1 142//1 141//1 +f 78//1 143//1 142//1 +f 78//1 144//1 143//1 +f 78//1 145//1 144//1 +f 78//1 146//1 145//1 +f 78//1 147//1 146//1 +f 78//1 148//1 147//1 +f 78//1 149//1 148//1 +f 78//1 150//1 149//1 diff --git a/alphanumeric/E.mtl b/alphanumeric/E.mtl new file mode 100644 index 0000000..f9f8262 --- /dev/null +++ b/alphanumeric/E.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.041 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/E.obj b/alphanumeric/E.obj new file mode 100644 index 0000000..5bf949b --- /dev/null +++ b/alphanumeric/E.obj @@ -0,0 +1,120 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib E.mtl +o E +v -0.006907 0.000000 -0.000000 +v -0.000125 0.000000 -0.008213 +v -0.006907 0.000000 -0.008213 +v -0.001805 0.000000 -0.004728 +v 0.006658 0.000000 -0.008213 +v 0.006658 0.000000 -0.006470 +v 0.006658 0.000000 -0.004728 +v -0.001805 0.000000 -0.003421 +v 0.002427 0.000000 -0.004728 +v -0.001805 0.000000 -0.002115 +v 0.002116 0.000000 -0.002115 +v 0.006036 0.000000 -0.002115 +v 0.006036 0.000000 -0.000435 +v 0.006036 -0.000000 0.001245 +v -0.006907 -0.000000 0.008213 +v -0.001805 -0.000000 0.001245 +v -0.001805 -0.000000 0.001275 +v 0.002116 -0.000000 0.001245 +v -0.001805 -0.000000 0.001359 +v -0.001805 -0.000000 0.001486 +v -0.001805 -0.000000 0.001645 +v -0.001805 -0.000000 0.001825 +v -0.001805 -0.000000 0.002016 +v -0.001805 -0.000000 0.002207 +v -0.001805 -0.000000 0.002388 +v -0.001805 -0.000000 0.002547 +v -0.001805 -0.000000 0.002672 +v -0.001805 -0.000000 0.002757 +v -0.001805 -0.000000 0.002788 +v -0.001804 -0.000000 0.003090 +v -0.001801 -0.000000 0.003352 +v -0.001796 -0.000000 0.003578 +v -0.001789 -0.000000 0.003768 +v -0.001779 -0.000000 0.003927 +v -0.001766 -0.000000 0.004058 +v -0.001749 -0.000000 0.004163 +v -0.001729 -0.000000 0.004246 +v -0.001705 -0.000000 0.004308 +v -0.001677 -0.000000 0.004353 +v -0.001643 -0.000000 0.004384 +v -0.001606 -0.000000 0.004405 +v -0.001540 -0.000000 0.004415 +v -0.001403 -0.000000 0.004424 +v -0.001200 -0.000000 0.004433 +v -0.000935 -0.000000 0.004441 +v -0.000615 -0.000000 0.004448 +v -0.000244 -0.000000 0.004457 +v 0.000172 -0.000000 0.004463 +v 0.000628 -0.000000 0.004469 +v 0.001119 -0.000000 0.004473 +v 0.001640 -0.000000 0.004477 +v 0.002186 -0.000000 0.004479 +v 0.002750 -0.000000 0.004479 +v 0.006907 -0.000000 0.004479 +v 0.006907 -0.000000 0.006346 +v 0.006907 -0.000000 0.008213 +v 0.000000 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.041 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 4//1 5//1 2//1 +f 4//1 6//1 5//1 +f 4//1 7//1 6//1 +f 1//1 8//1 4//1 +f 9//2 7//2 4//2 +f 1//1 10//1 8//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 15//1 16//1 1//1 +f 16//1 14//1 1//1 +f 15//1 17//1 16//1 +f 18//2 14//2 16//2 +f 15//1 19//1 17//1 +f 15//1 20//1 19//1 +f 15//1 21//1 20//1 +f 15//1 22//1 21//1 +f 15//1 23//1 22//1 +f 15//1 24//1 23//1 +f 15//1 25//1 24//1 +f 15//1 26//1 25//1 +f 15//1 27//1 26//1 +f 15//1 28//1 27//1 +f 15//1 29//1 28//1 +f 15//1 30//1 29//1 +f 15//1 31//1 30//1 +f 15//1 32//1 31//1 +f 15//1 33//1 32//1 +f 15//1 34//1 33//1 +f 15//1 35//1 34//1 +f 15//1 36//1 35//1 +f 15//1 37//1 36//1 +f 15//1 38//1 37//1 +f 15//1 39//1 38//1 +f 15//1 40//1 39//1 +f 15//1 41//1 40//1 +f 15//1 42//1 41//1 +f 15//1 43//1 42//1 +f 15//1 44//1 43//1 +f 15//1 45//1 44//1 +f 15//1 46//1 45//1 +f 15//1 47//1 46//1 +f 15//1 48//1 47//1 +f 15//1 49//1 48//1 +f 15//1 50//1 49//1 +f 15//1 51//1 50//1 +f 15//1 52//1 51//1 +f 15//1 53//1 52//1 +f 15//1 54//1 53//1 +f 15//1 55//1 54//1 +f 15//1 56//1 55//1 +f 57//2 56//2 15//2 diff --git a/alphanumeric/F.mtl b/alphanumeric/F.mtl new file mode 100644 index 0000000..8d64c24 --- /dev/null +++ b/alphanumeric/F.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.043 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/F.obj b/alphanumeric/F.obj new file mode 100644 index 0000000..8be05d5 --- /dev/null +++ b/alphanumeric/F.obj @@ -0,0 +1,46 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib F.mtl +o F +v -0.006323 0.000000 -0.000006 +v -0.000001 0.000000 -0.008181 +v -0.006347 0.000000 -0.008181 +v -0.001121 0.000000 -0.004697 +v 0.006347 0.000000 -0.008181 +v 0.006347 0.000000 -0.006439 +v 0.006347 0.000000 -0.004697 +v -0.001121 0.000000 -0.003266 +v 0.002613 0.000000 -0.004697 +v -0.001121 0.000000 -0.001835 +v 0.002054 0.000000 -0.001835 +v 0.005227 0.000000 -0.001835 +v 0.005227 0.000000 -0.000155 +v 0.005227 -0.000000 0.001525 +v -0.006284 -0.000000 0.008181 +v -0.001121 -0.000000 0.001525 +v -0.001144 -0.000000 0.004847 +v 0.002054 -0.000000 0.001525 +v -0.001183 -0.000000 0.008181 +v -0.003734 -0.000000 0.008181 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.043 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 4//1 5//1 2//1 +f 4//1 6//1 5//1 +f 4//1 7//1 6//1 +f 1//1 8//1 4//1 +f 9//2 7//2 4//2 +f 1//1 10//1 8//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 15//1 16//1 1//1 +f 16//1 14//1 1//1 +f 15//1 17//1 16//1 +f 18//2 14//2 16//2 +f 15//1 19//1 17//1 +f 20//2 19//2 15//2 diff --git a/alphanumeric/G.mtl b/alphanumeric/G.mtl new file mode 100644 index 0000000..254c229 --- /dev/null +++ b/alphanumeric/G.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.045 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/G.obj b/alphanumeric/G.obj new file mode 100644 index 0000000..0d3c01a --- /dev/null +++ b/alphanumeric/G.obj @@ -0,0 +1,387 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib G.mtl +o G +v -0.000394 0.000000 -0.008440 +v 0.000420 0.000000 -0.008446 +v 0.000002 0.000000 -0.008447 +v 0.000852 0.000000 -0.008438 +v -0.000760 0.000000 -0.008424 +v 0.001290 0.000000 -0.008422 +v -0.001087 0.000000 -0.008401 +v 0.001724 0.000000 -0.008400 +v -0.001968 0.000000 -0.008279 +v 0.002147 0.000000 -0.008372 +v 0.002549 0.000000 -0.008338 +v 0.002924 0.000000 -0.008298 +v 0.003262 0.000000 -0.008252 +v -0.002800 0.000000 -0.008090 +v 0.003555 0.000000 -0.008202 +v 0.003947 0.000000 -0.008115 +v 0.004321 0.000000 -0.008011 +v -0.003580 0.000000 -0.007834 +v 0.004677 0.000000 -0.007888 +v 0.005015 0.000000 -0.007747 +v -0.004308 0.000000 -0.007513 +v 0.005337 0.000000 -0.007588 +v 0.005642 0.000000 -0.007410 +v -0.004983 0.000000 -0.007127 +v 0.005932 0.000000 -0.007213 +v 0.006205 0.000000 -0.006997 +v -0.005602 0.000000 -0.006677 +v 0.006464 0.000000 -0.006762 +v 0.006707 0.000000 -0.006507 +v -0.006165 0.000000 -0.006166 +v 0.006936 0.000000 -0.006232 +v 0.007152 0.000000 -0.005937 +v -0.006669 0.000000 -0.005593 +v 0.007253 0.000000 -0.005783 +v 0.007361 0.000000 -0.005598 +v 0.007472 0.000000 -0.005392 +v -0.007115 0.000000 -0.004961 +v 0.007584 0.000000 -0.005171 +v 0.007693 0.000000 -0.004944 +v -0.007500 0.000000 -0.004269 +v 0.000025 0.000000 -0.004781 +v 0.000394 0.000000 -0.004792 +v 0.007796 0.000000 -0.004719 +v 0.000681 0.000000 -0.004784 +v 0.000954 0.000000 -0.004759 +v -0.000322 0.000000 -0.004745 +v 0.001211 0.000000 -0.004717 +v -0.000647 0.000000 -0.004686 +v 0.007888 0.000000 -0.004504 +v 0.001452 0.000000 -0.004659 +v -0.000951 0.000000 -0.004602 +v 0.001679 0.000000 -0.004584 +v -0.001235 0.000000 -0.004493 +v 0.001890 0.000000 -0.004492 +v 0.007967 0.000000 -0.004306 +v -0.001500 0.000000 -0.004360 +v 0.002086 0.000000 -0.004383 +v 0.002268 0.000000 -0.004258 +v -0.001744 0.000000 -0.004201 +v 0.008030 0.000000 -0.004133 +v -0.007822 0.000000 -0.003520 +v 0.002434 0.000000 -0.004115 +v -0.001970 0.000000 -0.004016 +v 0.008073 0.000000 -0.003994 +v 0.002586 0.000000 -0.003956 +v -0.002177 0.000000 -0.003805 +v 0.008092 0.000000 -0.003896 +v 0.002723 0.000000 -0.003779 +v 0.008085 0.000000 -0.003847 +v 0.007976 0.000000 -0.003815 +v 0.007704 0.000000 -0.003756 +v -0.002367 0.000000 -0.003568 +v 0.002845 0.000000 -0.003585 +v 0.007300 0.000000 -0.003675 +v 0.006798 0.000000 -0.003578 +v 0.002891 0.000000 -0.003505 +v 0.006231 0.000000 -0.003472 +v -0.002538 0.000000 -0.003304 +v -0.008082 0.000000 -0.002714 +v 0.002936 0.000000 -0.003428 +v 0.005632 0.000000 -0.003361 +v 0.002979 0.000000 -0.003353 +v 0.005033 0.000000 -0.003253 +v 0.003019 0.000000 -0.003283 +v -0.002693 0.000000 -0.003013 +v 0.003057 0.000000 -0.003217 +v 0.004468 0.000000 -0.003153 +v 0.003091 0.000000 -0.003156 +v 0.003122 0.000000 -0.003102 +v 0.003970 0.000000 -0.003066 +v 0.003149 0.000000 -0.003054 +v 0.003572 0.000000 -0.003000 +v 0.003171 0.000000 -0.003015 +v 0.003188 0.000000 -0.002984 +v -0.002908 0.000000 -0.002498 +v 0.003306 0.000000 -0.002959 +v 0.003200 0.000000 -0.002962 +v 0.003206 0.000000 -0.002951 +v -0.008170 0.000000 -0.002335 +v -0.003075 0.000000 -0.001932 +v -0.008242 0.000000 -0.001914 +v -0.003194 0.000000 -0.001327 +v -0.008299 0.000000 -0.001459 +v -0.008339 0.000000 -0.000979 +v -0.003267 0.000000 -0.000697 +v 0.000531 -0.000000 0.000558 +v 0.004426 0.000000 -0.001159 +v 0.000531 0.000000 -0.001184 +v 0.008309 0.000000 -0.001122 +v 0.008346 -0.000000 0.002362 +v -0.008363 0.000000 -0.000482 +v -0.003294 0.000000 -0.000054 +v -0.008371 -0.000000 0.000022 +v -0.003276 -0.000000 0.000589 +v -0.008363 -0.000000 0.000524 +v -0.008339 -0.000000 0.001017 +v 0.000531 -0.000000 0.002300 +v 0.002211 -0.000000 0.002300 +v 0.003903 -0.000000 0.002300 +v -0.003215 -0.000000 0.001220 +v -0.008299 -0.000000 0.001490 +v -0.003110 -0.000000 0.001826 +v -0.008242 -0.000000 0.001936 +v -0.002964 -0.000000 0.002394 +v -0.008170 -0.000000 0.002346 +v 0.003866 -0.000000 0.003072 +v -0.008082 -0.000000 0.002711 +v 0.008371 -0.000000 0.005846 +v -0.002777 -0.000000 0.002911 +v -0.007910 -0.000000 0.003254 +v -0.002549 -0.000000 0.003365 +v 0.003829 -0.000000 0.003856 +v -0.007708 -0.000000 0.003780 +v -0.002282 -0.000000 0.003744 +v -0.001986 -0.000000 0.004042 +v -0.007477 -0.000000 0.004286 +v 0.003020 -0.000000 0.004254 +v -0.001649 -0.000000 0.004295 +v 0.002513 -0.000000 0.004475 +v -0.007218 -0.000000 0.004771 +v -0.001275 -0.000000 0.004502 +v 0.002003 -0.000000 0.004646 +v -0.000869 -0.000000 0.004664 +v 0.001495 -0.000000 0.004769 +v -0.000435 -0.000000 0.004779 +v 0.000992 -0.000000 0.004843 +v -0.006935 -0.000000 0.005231 +v 0.000023 -0.000000 0.004848 +v 0.000500 -0.000000 0.004869 +v -0.006629 -0.000000 0.005664 +v -0.006301 -0.000000 0.006068 +v 0.008366 -0.000000 0.005850 +v 0.008351 -0.000000 0.005859 +v 0.008328 -0.000000 0.005874 +v 0.008300 -0.000000 0.005892 +v 0.008268 -0.000000 0.005912 +v 0.008234 -0.000000 0.005933 +v 0.008200 -0.000000 0.005955 +v 0.008168 -0.000000 0.005975 +v 0.008140 -0.000000 0.005993 +v 0.008118 -0.000000 0.006008 +v 0.008103 -0.000000 0.006017 +v 0.008097 -0.000000 0.006021 +v 0.007345 -0.000000 0.006489 +v -0.005954 -0.000000 0.006441 +v -0.005590 -0.000000 0.006779 +v 0.006652 -0.000000 0.006897 +v -0.005209 -0.000000 0.007080 +v 0.006008 -0.000000 0.007247 +v -0.004816 -0.000000 0.007343 +v 0.005399 -0.000000 0.007543 +v -0.004410 -0.000000 0.007563 +v 0.004813 -0.000000 0.007789 +v -0.004066 -0.000000 0.007724 +v -0.003732 -0.000000 0.007866 +v 0.004236 -0.000000 0.007990 +v -0.003402 -0.000000 0.007988 +v -0.003072 -0.000000 0.008094 +v 0.003657 -0.000000 0.008148 +v -0.002735 -0.000000 0.008183 +v 0.003063 -0.000000 0.008269 +v -0.002386 -0.000000 0.008257 +v -0.002020 -0.000000 0.008317 +v 0.002441 -0.000000 0.008355 +v -0.001629 -0.000000 0.008364 +v 0.001778 -0.000000 0.008411 +v -0.001209 -0.000000 0.008400 +v -0.000755 -0.000000 0.008425 +v 0.001063 -0.000000 0.008440 +v -0.000260 -0.000000 0.008440 +v 0.000282 -0.000000 0.008447 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.045 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 5//1 6//1 4//1 +f 7//1 6//1 5//1 +f 7//1 8//1 6//1 +f 9//1 8//1 7//1 +f 9//1 10//1 8//1 +f 9//1 11//1 10//1 +f 9//1 12//1 11//1 +f 9//1 13//1 12//1 +f 14//1 13//1 9//1 +f 14//1 15//1 13//1 +f 14//1 16//1 15//1 +f 14//1 17//1 16//1 +f 18//1 17//1 14//1 +f 18//1 19//1 17//1 +f 18//1 20//1 19//1 +f 21//1 20//1 18//1 +f 21//1 22//1 20//1 +f 21//1 23//1 22//1 +f 24//1 23//1 21//1 +f 24//1 25//1 23//1 +f 24//1 26//1 25//1 +f 27//1 26//1 24//1 +f 27//1 28//1 26//1 +f 27//1 29//1 28//1 +f 30//1 29//1 27//1 +f 30//1 31//1 29//1 +f 30//1 32//1 31//1 +f 33//1 32//1 30//1 +f 33//1 34//1 32//1 +f 33//1 35//1 34//1 +f 33//1 36//1 35//1 +f 37//1 36//1 33//1 +f 37//1 38//1 36//1 +f 37//1 39//1 38//1 +f 40//1 41//1 37//1 +f 41//1 42//1 37//1 +f 42//1 39//1 37//1 +f 42//1 43//1 39//1 +f 44//1 43//1 42//1 +f 45//1 43//1 44//1 +f 40//1 46//1 41//1 +f 47//1 43//1 45//1 +f 40//1 48//1 46//1 +f 47//1 49//1 43//1 +f 50//1 49//1 47//1 +f 40//1 51//1 48//1 +f 52//1 49//1 50//1 +f 40//1 53//1 51//1 +f 54//1 49//1 52//1 +f 54//1 55//1 49//1 +f 40//1 56//1 53//1 +f 57//1 55//1 54//1 +f 58//1 55//1 57//1 +f 40//1 59//1 56//1 +f 58//1 60//1 55//1 +f 61//1 59//1 40//1 +f 62//1 60//1 58//1 +f 61//1 63//1 59//1 +f 62//1 64//1 60//1 +f 65//1 64//1 62//1 +f 61//1 66//1 63//1 +f 65//1 67//1 64//1 +f 68//1 67//1 65//1 +f 68//1 69//1 67//1 +f 68//1 70//1 69//1 +f 68//1 71//1 70//1 +f 61//1 72//1 66//1 +f 73//1 71//1 68//1 +f 73//1 74//1 71//1 +f 73//1 75//1 74//1 +f 76//1 75//1 73//1 +f 76//1 77//1 75//1 +f 61//1 78//1 72//1 +f 79//1 78//1 61//1 +f 80//1 77//1 76//1 +f 80//1 81//1 77//1 +f 82//1 81//1 80//1 +f 82//1 83//1 81//1 +f 84//1 83//1 82//1 +f 79//1 85//1 78//1 +f 86//1 83//1 84//1 +f 86//1 87//1 83//1 +f 88//1 87//1 86//1 +f 89//1 87//1 88//1 +f 89//1 90//1 87//1 +f 91//1 90//1 89//1 +f 91//1 92//1 90//1 +f 93//1 92//1 91//1 +f 94//1 92//1 93//1 +f 79//1 95//1 85//1 +f 94//1 96//1 92//1 +f 97//1 96//1 94//1 +f 98//1 96//1 97//1 +f 99//1 95//1 79//1 +f 99//1 100//1 95//1 +f 101//1 100//1 99//1 +f 101//1 102//1 100//1 +f 103//1 102//1 101//1 +f 104//1 102//1 103//1 +f 104//1 105//1 102//1 +f 106//1 107//1 108//1 +f 106//1 109//1 107//1 +f 106//1 110//1 109//1 +f 111//1 105//1 104//1 +f 111//1 112//1 105//1 +f 113//1 112//1 111//1 +f 113//1 114//1 112//1 +f 115//1 114//1 113//1 +f 116//1 114//1 115//1 +f 117//1 118//1 106//1 +f 118//1 119//1 106//1 +f 119//1 110//1 106//1 +f 116//1 120//1 114//1 +f 121//1 120//1 116//1 +f 121//1 122//1 120//1 +f 123//1 122//1 121//1 +f 123//1 124//1 122//1 +f 125//1 124//1 123//1 +f 126//1 110//1 119//1 +f 127//1 124//1 125//1 +f 126//1 128//1 110//1 +f 127//1 129//1 124//1 +f 130//1 129//1 127//1 +f 130//1 131//1 129//1 +f 132//1 128//1 126//1 +f 133//1 131//1 130//1 +f 133//1 134//1 131//1 +f 133//1 135//1 134//1 +f 136//1 135//1 133//1 +f 137//1 128//1 132//1 +f 136//1 138//1 135//1 +f 139//1 128//1 137//1 +f 140//1 138//1 136//1 +f 140//1 141//1 138//1 +f 142//1 128//1 139//1 +f 140//1 143//1 141//1 +f 144//1 128//1 142//1 +f 140//1 145//1 143//1 +f 146//1 128//1 144//1 +f 147//1 145//1 140//1 +f 147//1 148//1 145//1 +f 149//1 128//1 146//1 +f 147//1 149//1 148//1 +f 147//1 128//1 149//1 +f 150//1 128//1 147//1 +f 151//1 128//1 150//1 +f 151//1 152//1 128//1 +f 151//1 153//1 152//1 +f 151//1 154//1 153//1 +f 151//1 155//1 154//1 +f 151//1 156//1 155//1 +f 151//1 157//1 156//1 +f 151//1 158//1 157//1 +f 151//1 159//1 158//1 +f 151//1 160//1 159//1 +f 151//1 161//1 160//1 +f 151//1 162//1 161//1 +f 151//1 163//1 162//1 +f 151//1 164//1 163//1 +f 165//1 164//1 151//1 +f 166//1 164//1 165//1 +f 166//1 167//1 164//1 +f 168//1 167//1 166//1 +f 168//1 169//1 167//1 +f 170//1 169//1 168//1 +f 170//1 171//1 169//1 +f 172//1 171//1 170//1 +f 172//1 173//1 171//1 +f 174//1 173//1 172//1 +f 175//1 173//1 174//1 +f 175//1 176//1 173//1 +f 177//1 176//1 175//1 +f 178//1 176//1 177//1 +f 178//1 179//1 176//1 +f 180//1 179//1 178//1 +f 180//1 181//1 179//1 +f 182//1 181//1 180//1 +f 183//1 181//1 182//1 +f 183//1 184//1 181//1 +f 185//1 184//1 183//1 +f 185//1 186//1 184//1 +f 187//1 186//1 185//1 +f 188//1 186//1 187//1 +f 188//1 189//1 186//1 +f 190//1 189//1 188//1 +f 191//1 189//1 190//1 diff --git a/alphanumeric/H.mtl b/alphanumeric/H.mtl new file mode 100644 index 0000000..2bd4a9f --- /dev/null +++ b/alphanumeric/H.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.047 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/H.obj b/alphanumeric/H.obj new file mode 100644 index 0000000..b43424f --- /dev/null +++ b/alphanumeric/H.obj @@ -0,0 +1,54 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib H.mtl +o H +v -0.007903 0.000000 -0.000000 +v -0.005352 0.000000 -0.008213 +v -0.007903 0.000000 -0.008213 +v -0.002800 0.000000 -0.008213 +v -0.002800 0.000000 -0.005350 +v 0.002701 0.000000 -0.005325 +v 0.005289 0.000000 -0.008151 +v 0.002738 0.000000 -0.008151 +v 0.007841 0.000000 -0.008151 +v 0.007878 -0.000000 0.000025 +v -0.002800 0.000000 -0.002488 +v 0.002676 0.000000 -0.002488 +v -0.000062 0.000000 -0.002488 +v -0.007903 -0.000000 0.008213 +v -0.002800 -0.000000 0.001617 +v -0.000062 -0.000000 0.001617 +v 0.002676 -0.000000 0.001617 +v 0.007903 -0.000000 0.008213 +v -0.002800 -0.000000 0.004914 +v 0.002676 -0.000000 0.004914 +v -0.002800 -0.000000 0.008213 +v 0.002676 -0.000000 0.008213 +v -0.005352 -0.000000 0.008213 +v 0.005289 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.047 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 7//1 8//1 +f 6//1 9//1 7//1 +f 6//1 10//1 9//1 +f 1//1 11//1 5//1 +f 12//1 10//1 6//1 +f 1//1 13//1 11//1 +f 1//1 12//1 13//1 +f 1//1 10//1 12//1 +f 14//1 15//1 1//1 +f 15//1 10//1 1//1 +f 15//1 16//1 10//1 +f 16//1 17//1 10//1 +f 17//1 18//1 10//1 +f 14//1 19//1 15//1 +f 20//1 18//1 17//1 +f 14//1 21//1 19//1 +f 22//1 18//1 20//1 +f 23//2 21//2 14//2 +f 24//2 18//2 22//2 diff --git a/alphanumeric/I.mtl b/alphanumeric/I.mtl new file mode 100644 index 0000000..7e06fb8 --- /dev/null +++ b/alphanumeric/I.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.050 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/I.obj b/alphanumeric/I.obj new file mode 100644 index 0000000..3e706e4 --- /dev/null +++ b/alphanumeric/I.obj @@ -0,0 +1,134 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib I.mtl +o I +v -0.000247 0.000000 -0.008274 +v 0.002551 0.000000 -0.008275 +v 0.000087 0.000000 -0.008275 +v 0.002551 0.000000 -0.000000 +v -0.000570 0.000000 -0.008271 +v -0.000878 0.000000 -0.008267 +v -0.001168 0.000000 -0.008263 +v -0.001439 0.000000 -0.008256 +v -0.001683 0.000000 -0.008249 +v -0.001900 0.000000 -0.008241 +v -0.002087 0.000000 -0.008232 +v -0.002240 0.000000 -0.008222 +v -0.002356 0.000000 -0.008212 +v -0.002433 0.000000 -0.008199 +v -0.002465 0.000000 -0.008187 +v -0.002476 0.000000 -0.008104 +v -0.002487 0.000000 -0.007877 +v -0.002499 0.000000 -0.007519 +v -0.002509 0.000000 -0.007037 +v -0.002518 0.000000 -0.006444 +v -0.002527 0.000000 -0.005749 +v -0.002534 0.000000 -0.004960 +v -0.002540 0.000000 -0.004091 +v -0.002544 0.000000 -0.003148 +v -0.002548 0.000000 -0.002143 +v -0.002550 0.000000 -0.001087 +v -0.002551 -0.000000 0.000013 +v 0.002551 -0.000000 0.008275 +v -0.002551 -0.000000 0.000172 +v -0.002551 -0.000000 0.000614 +v -0.002551 -0.000000 0.001280 +v -0.002551 -0.000000 0.002116 +v -0.002551 -0.000000 0.003063 +v -0.002551 -0.000000 0.004069 +v -0.002551 -0.000000 0.005073 +v -0.002551 -0.000000 0.006022 +v -0.002551 -0.000000 0.006857 +v -0.002551 -0.000000 0.007525 +v -0.002551 -0.000000 0.007965 +v -0.002551 -0.000000 0.008126 +v -0.002265 -0.000000 0.008199 +v -0.002207 -0.000000 0.008210 +v -0.002111 -0.000000 0.008219 +v -0.001981 -0.000000 0.008228 +v -0.001816 -0.000000 0.008236 +v -0.001623 -0.000000 0.008244 +v -0.001404 -0.000000 0.008251 +v -0.001163 -0.000000 0.008257 +v -0.000900 -0.000000 0.008264 +v -0.000621 -0.000000 0.008267 +v -0.000328 -0.000000 0.008272 +v -0.000025 -0.000000 0.008274 +v 0.000286 -0.000000 0.008275 +v 0.000331 -0.000000 0.008275 +v 0.000454 -0.000000 0.008275 +v 0.000640 -0.000000 0.008275 +v 0.000874 -0.000000 0.008275 +v 0.001138 -0.000000 0.008275 +v 0.001418 -0.000000 0.008275 +v 0.001700 -0.000000 0.008275 +v 0.001964 -0.000000 0.008275 +v 0.002198 -0.000000 0.008275 +v 0.002384 -0.000000 0.008275 +v 0.002507 -0.000000 0.008275 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.050 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 4//1 1//1 +f 6//1 4//1 5//1 +f 7//1 4//1 6//1 +f 8//1 4//1 7//1 +f 9//1 4//1 8//1 +f 10//1 4//1 9//1 +f 11//1 4//1 10//1 +f 12//1 4//1 11//1 +f 13//1 4//1 12//1 +f 14//1 4//1 13//1 +f 15//1 4//1 14//1 +f 16//1 4//1 15//1 +f 17//1 4//1 16//1 +f 18//1 4//1 17//1 +f 19//1 4//1 18//1 +f 20//1 4//1 19//1 +f 21//1 4//1 20//1 +f 22//1 4//1 21//1 +f 23//1 4//1 22//1 +f 24//1 4//1 23//1 +f 25//1 4//1 24//1 +f 26//1 4//1 25//1 +f 27//1 4//1 26//1 +f 27//1 28//1 4//1 +f 29//1 28//1 27//1 +f 30//1 28//1 29//1 +f 31//1 28//1 30//1 +f 32//1 28//1 31//1 +f 33//1 28//1 32//1 +f 34//1 28//1 33//1 +f 35//1 28//1 34//1 +f 36//1 28//1 35//1 +f 37//1 28//1 36//1 +f 38//1 28//1 37//1 +f 39//1 28//1 38//1 +f 40//1 28//1 39//1 +f 41//1 28//1 40//1 +f 42//1 28//1 41//1 +f 43//1 28//1 42//1 +f 44//1 28//1 43//1 +f 45//1 28//1 44//1 +f 46//1 28//1 45//1 +f 47//1 28//1 46//1 +f 48//1 28//1 47//1 +f 49//1 28//1 48//1 +f 50//1 28//1 49//1 +f 51//1 28//1 50//1 +f 52//1 28//1 51//1 +f 53//1 28//1 52//1 +f 54//2 28//2 53//2 +f 55//2 28//2 54//2 +f 56//2 28//2 55//2 +f 57//2 28//2 56//2 +f 58//2 28//2 57//2 +f 59//2 28//2 58//2 +f 60//2 28//2 59//2 +f 61//2 28//2 60//2 +f 62//2 28//2 61//2 +f 63//2 28//2 62//2 +f 64//2 28//2 63//2 diff --git a/alphanumeric/J.mtl b/alphanumeric/J.mtl new file mode 100644 index 0000000..ace39d1 --- /dev/null +++ b/alphanumeric/J.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.051 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/J.obj b/alphanumeric/J.obj new file mode 100644 index 0000000..087e577 --- /dev/null +++ b/alphanumeric/J.obj @@ -0,0 +1,297 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib J.mtl +o J +v 0.001549 0.000000 -0.008227 +v 0.004101 0.000000 -0.008336 +v 0.001550 0.000000 -0.008336 +v 0.006653 0.000000 -0.008336 +v 0.006651 0.000000 -0.008225 +v 0.001545 0.000000 -0.007920 +v 0.006648 0.000000 -0.007916 +v 0.001540 0.000000 -0.007457 +v 0.006643 0.000000 -0.007449 +v 0.001534 0.000000 -0.006879 +v 0.006637 0.000000 -0.006863 +v 0.001527 0.000000 -0.006221 +v 0.006629 0.000000 -0.006198 +v 0.001519 0.000000 -0.005524 +v 0.006622 0.000000 -0.005493 +v 0.001511 0.000000 -0.004829 +v 0.006614 0.000000 -0.004790 +v 0.001504 0.000000 -0.004171 +v 0.006607 0.000000 -0.004125 +v 0.001498 0.000000 -0.003592 +v 0.006600 0.000000 -0.003539 +v 0.001492 0.000000 -0.003129 +v 0.006595 0.000000 -0.003071 +v 0.001489 0.000000 -0.002823 +v 0.006592 0.000000 -0.002763 +v 0.001488 0.000000 -0.002713 +v 0.006590 0.000000 -0.002651 +v 0.001481 0.000000 -0.001781 +v 0.006575 0.000000 -0.001221 +v 0.001472 0.000000 -0.000932 +v 0.006557 0.000000 -0.000003 +v 0.001462 0.000000 -0.000163 +v 0.001450 -0.000000 0.000527 +v 0.006536 -0.000000 0.001026 +v 0.001436 -0.000000 0.001139 +v 0.006509 -0.000000 0.001888 +v 0.001421 -0.000000 0.001675 +v 0.001404 -0.000000 0.002136 +v 0.006474 -0.000000 0.002605 +v -0.002444 -0.000000 0.001948 +v -0.002011 -0.000000 0.001959 +v -0.002196 -0.000000 0.001942 +v -0.002766 -0.000000 0.001974 +v -0.001880 -0.000000 0.002002 +v -0.003170 -0.000000 0.002021 +v -0.001794 -0.000000 0.002071 +v -0.003665 -0.000000 0.002085 +v -0.001744 -0.000000 0.002168 +v -0.004262 -0.000000 0.002165 +v 0.001385 -0.000000 0.002525 +v -0.004578 -0.000000 0.002208 +v -0.001719 -0.000000 0.002296 +v -0.004881 -0.000000 0.002250 +v -0.005171 -0.000000 0.002291 +v -0.005442 -0.000000 0.002330 +v -0.001711 -0.000000 0.002456 +v -0.005694 -0.000000 0.002367 +v -0.005922 -0.000000 0.002401 +v -0.006124 -0.000000 0.002432 +v -0.006297 -0.000000 0.002460 +v -0.001711 -0.000000 0.002650 +v -0.006437 -0.000000 0.002483 +v -0.006543 -0.000000 0.002502 +v -0.006612 -0.000000 0.002516 +v -0.006639 -0.000000 0.002525 +v 0.001364 -0.000000 0.002843 +v -0.006653 -0.000000 0.002576 +v -0.006652 -0.000000 0.002678 +v 0.006429 -0.000000 0.003199 +v -0.001709 -0.000000 0.002718 +v -0.006637 -0.000000 0.002825 +v -0.001705 -0.000000 0.002790 +v -0.001697 -0.000000 0.002865 +v -0.006611 -0.000000 0.003007 +v 0.001341 -0.000000 0.003090 +v -0.001688 -0.000000 0.002943 +v -0.001675 -0.000000 0.003021 +v -0.006576 -0.000000 0.003216 +v -0.001661 -0.000000 0.003101 +v 0.001316 -0.000000 0.003270 +v -0.001644 -0.000000 0.003180 +v -0.001626 -0.000000 0.003258 +v 0.006371 -0.000000 0.003693 +v -0.006532 -0.000000 0.003446 +v -0.001606 -0.000000 0.003332 +v 0.001289 -0.000000 0.003384 +v -0.001584 -0.000000 0.003404 +v 0.001150 -0.000000 0.003705 +v -0.001561 -0.000000 0.003471 +v -0.006481 -0.000000 0.003686 +v -0.001536 -0.000000 0.003533 +v -0.001372 -0.000000 0.003837 +v -0.006426 -0.000000 0.003927 +v 0.006298 -0.000000 0.004109 +v 0.000963 -0.000000 0.003971 +v -0.001164 -0.000000 0.004081 +v -0.006368 -0.000000 0.004165 +v 0.000737 -0.000000 0.004182 +v -0.000922 -0.000000 0.004269 +v 0.006209 -0.000000 0.004469 +v -0.006308 -0.000000 0.004389 +v 0.000482 -0.000000 0.004339 +v -0.000655 -0.000000 0.004397 +v 0.000206 -0.000000 0.004437 +v -0.006249 -0.000000 0.004592 +v -0.000372 -0.000000 0.004467 +v -0.000082 -0.000000 0.004482 +v 0.006101 -0.000000 0.004796 +v -0.006191 -0.000000 0.004765 +v -0.005940 -0.000000 0.005354 +v 0.005971 -0.000000 0.005111 +v 0.005819 -0.000000 0.005437 +v -0.005644 -0.000000 0.005885 +v 0.005702 -0.000000 0.005646 +v 0.005559 -0.000000 0.005861 +v 0.005393 -0.000000 0.006078 +v -0.005304 -0.000000 0.006362 +v 0.005207 -0.000000 0.006297 +v 0.005003 -0.000000 0.006511 +v -0.004917 -0.000000 0.006786 +v 0.004784 -0.000000 0.006723 +v 0.004554 -0.000000 0.006924 +v -0.004482 -0.000000 0.007156 +v 0.004315 -0.000000 0.007116 +v 0.004070 -0.000000 0.007296 +v -0.003998 -0.000000 0.007475 +v 0.003823 -0.000000 0.007459 +v 0.003575 -0.000000 0.007604 +v -0.003461 -0.000000 0.007741 +v 0.003330 -0.000000 0.007728 +v 0.003092 -0.000000 0.007827 +v -0.002872 -0.000000 0.007958 +v 0.002852 -0.000000 0.007919 +v 0.002608 -0.000000 0.008001 +v -0.002229 -0.000000 0.008125 +v 0.002359 -0.000000 0.008073 +v 0.002104 -0.000000 0.008136 +v -0.001529 -0.000000 0.008243 +v 0.001841 -0.000000 0.008190 +v 0.001570 -0.000000 0.008235 +v 0.001289 -0.000000 0.008272 +v -0.000772 -0.000000 0.008313 +v 0.000997 -0.000000 0.008300 +v 0.000693 -0.000000 0.008320 +v 0.000044 -0.000000 0.008336 +v 0.000376 -0.000000 0.008333 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.051 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 5//1 1//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 12//1 11//1 10//1 +f 12//1 13//1 11//1 +f 14//1 13//1 12//1 +f 14//1 15//1 13//1 +f 16//1 15//1 14//1 +f 16//1 17//1 15//1 +f 18//1 17//1 16//1 +f 18//1 19//1 17//1 +f 20//1 19//1 18//1 +f 20//1 21//1 19//1 +f 22//1 21//1 20//1 +f 22//1 23//1 21//1 +f 24//1 23//1 22//1 +f 24//1 25//1 23//1 +f 26//1 25//1 24//1 +f 26//1 27//1 25//1 +f 28//1 27//1 26//1 +f 28//1 29//1 27//1 +f 30//1 29//1 28//1 +f 30//1 31//1 29//1 +f 32//1 31//1 30//1 +f 33//1 31//1 32//1 +f 33//1 34//1 31//1 +f 35//1 34//1 33//1 +f 35//1 36//1 34//1 +f 37//1 36//1 35//1 +f 38//1 36//1 37//1 +f 38//1 39//1 36//1 +f 40//1 41//1 42//1 +f 43//1 41//1 40//1 +f 43//1 44//1 41//1 +f 45//1 44//1 43//1 +f 45//1 46//1 44//1 +f 47//1 46//1 45//1 +f 47//1 48//1 46//1 +f 49//1 48//1 47//1 +f 50//1 39//1 38//1 +f 51//1 48//1 49//1 +f 51//1 52//1 48//1 +f 53//1 52//1 51//1 +f 54//1 52//1 53//1 +f 55//1 52//1 54//1 +f 55//1 56//1 52//1 +f 57//1 56//1 55//1 +f 58//1 56//1 57//1 +f 59//1 56//1 58//1 +f 60//1 56//1 59//1 +f 60//1 61//1 56//1 +f 62//1 61//1 60//1 +f 63//1 61//1 62//1 +f 64//1 61//1 63//1 +f 65//1 61//1 64//1 +f 66//1 39//1 50//1 +f 67//1 61//1 65//1 +f 68//1 61//1 67//1 +f 66//1 69//1 39//1 +f 68//1 70//1 61//1 +f 71//1 70//1 68//1 +f 71//1 72//1 70//1 +f 71//1 73//1 72//1 +f 74//1 73//1 71//1 +f 75//1 69//1 66//1 +f 74//1 76//1 73//1 +f 74//1 77//1 76//1 +f 78//1 77//1 74//1 +f 78//1 79//1 77//1 +f 80//1 69//1 75//1 +f 78//1 81//1 79//1 +f 78//1 82//1 81//1 +f 80//1 83//1 69//1 +f 84//1 82//1 78//1 +f 84//1 85//1 82//1 +f 86//1 83//1 80//1 +f 84//1 87//1 85//1 +f 88//1 83//1 86//1 +f 84//1 89//1 87//1 +f 90//1 89//1 84//1 +f 90//1 91//1 89//1 +f 90//1 92//1 91//1 +f 93//1 92//1 90//1 +f 88//1 94//1 83//1 +f 95//1 94//1 88//1 +f 93//1 96//1 92//1 +f 97//1 96//1 93//1 +f 98//1 94//1 95//1 +f 97//1 99//1 96//1 +f 98//1 100//1 94//1 +f 101//1 99//1 97//1 +f 102//1 100//1 98//1 +f 101//1 103//1 99//1 +f 104//1 100//1 102//1 +f 105//1 103//1 101//1 +f 105//1 106//1 103//1 +f 107//1 100//1 104//1 +f 105//1 107//1 106//1 +f 107//1 108//1 100//1 +f 105//1 108//1 107//1 +f 109//1 108//1 105//1 +f 110//1 108//1 109//1 +f 110//1 111//1 108//1 +f 110//1 112//1 111//1 +f 113//1 112//1 110//1 +f 113//1 114//1 112//1 +f 113//1 115//1 114//1 +f 113//1 116//1 115//1 +f 117//1 116//1 113//1 +f 117//1 118//1 116//1 +f 117//1 119//1 118//1 +f 120//1 119//1 117//1 +f 120//1 121//1 119//1 +f 120//1 122//1 121//1 +f 123//1 122//1 120//1 +f 123//1 124//1 122//1 +f 123//1 125//1 124//1 +f 126//1 125//1 123//1 +f 126//1 127//1 125//1 +f 126//1 128//1 127//1 +f 129//1 128//1 126//1 +f 129//1 130//1 128//1 +f 129//1 131//1 130//1 +f 132//1 131//1 129//1 +f 132//1 133//1 131//1 +f 132//1 134//1 133//1 +f 135//1 134//1 132//1 +f 135//1 136//1 134//1 +f 135//1 137//1 136//1 +f 138//1 137//1 135//1 +f 138//1 139//1 137//1 +f 138//1 140//1 139//1 +f 138//1 141//1 140//1 +f 142//1 141//1 138//1 +f 142//1 143//1 141//1 +f 142//1 144//1 143//1 +f 145//1 144//1 142//1 +f 145//1 146//1 144//1 diff --git a/alphanumeric/K.mtl b/alphanumeric/K.mtl new file mode 100644 index 0000000..dd67a75 --- /dev/null +++ b/alphanumeric/K.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.054 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/K.obj b/alphanumeric/K.obj new file mode 100644 index 0000000..2e6d55c --- /dev/null +++ b/alphanumeric/K.obj @@ -0,0 +1,249 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib K.mtl +o K +v -0.008687 0.000000 -0.000038 +v -0.006160 0.000000 -0.008275 +v -0.008712 0.000000 -0.008275 +v -0.003609 0.000000 -0.008275 +v -0.003584 0.000000 -0.005201 +v 0.005465 0.000000 -0.008252 +v 0.006307 0.000000 -0.008252 +v 0.005897 0.000000 -0.008253 +v 0.006690 0.000000 -0.008249 +v 0.005016 0.000000 -0.008249 +v 0.007044 0.000000 -0.008247 +v 0.004950 0.000000 -0.008249 +v 0.004770 0.000000 -0.008247 +v 0.004496 0.000000 -0.008244 +v 0.007363 0.000000 -0.008241 +v 0.004154 0.000000 -0.008240 +v 0.007645 0.000000 -0.008236 +v 0.003766 0.000000 -0.008236 +v 0.007884 0.000000 -0.008229 +v 0.003354 0.000000 -0.008230 +v 0.002943 0.000000 -0.008226 +v 0.008077 0.000000 -0.008220 +v 0.002554 0.000000 -0.008222 +v 0.002212 0.000000 -0.008218 +v 0.008220 0.000000 -0.008210 +v 0.001939 0.000000 -0.008214 +v 0.001758 0.000000 -0.008213 +v 0.001693 0.000000 -0.008213 +v -0.000933 0.000000 -0.005164 +v 0.008308 0.000000 -0.008199 +v 0.008339 0.000000 -0.008187 +v 0.008312 0.000000 -0.008144 +v 0.008233 0.000000 -0.008050 +v 0.008107 0.000000 -0.007906 +v 0.007937 0.000000 -0.007717 +v 0.007726 0.000000 -0.007488 +v 0.007478 0.000000 -0.007221 +v 0.007197 0.000000 -0.006921 +v 0.006886 0.000000 -0.006592 +v 0.006548 0.000000 -0.006237 +v 0.006188 0.000000 -0.005859 +v 0.005809 0.000000 -0.005463 +v 0.005414 0.000000 -0.005052 +v -0.003547 0.000000 -0.002116 +v 0.004782 0.000000 -0.004393 +v 0.004248 0.000000 -0.003835 +v 0.003806 0.000000 -0.003368 +v 0.003448 0.000000 -0.002983 +v 0.003165 0.000000 -0.002672 +v 0.002950 0.000000 -0.002424 +v 0.002795 0.000000 -0.002230 +v 0.002693 0.000000 -0.002081 +v 0.002635 0.000000 -0.001969 +v 0.002614 0.000000 -0.001883 +v 0.002622 0.000000 -0.001815 +v 0.002651 0.000000 -0.001755 +v 0.002885 0.000000 -0.001382 +v 0.003290 0.000000 -0.000726 +v 0.003829 -0.000000 0.000153 +v -0.008649 -0.000000 0.008213 +v -0.001079 -0.000000 0.001638 +v -0.001017 -0.000000 0.001587 +v -0.000976 -0.000000 0.001559 +v -0.000958 -0.000000 0.001555 +v 0.004466 -0.000000 0.001194 +v 0.005164 -0.000000 0.002339 +v -0.000934 -0.000000 0.001595 +v -0.000880 -0.000000 0.001692 +v -0.001160 -0.000000 0.001711 +v -0.000799 -0.000000 0.001842 +v -0.001259 -0.000000 0.001804 +v -0.001374 -0.000000 0.001914 +v -0.000694 -0.000000 0.002043 +v -0.001502 -0.000000 0.002041 +v -0.001644 -0.000000 0.002183 +v -0.000565 -0.000000 0.002288 +v -0.001796 -0.000000 0.002337 +v -0.000415 -0.000000 0.002575 +v -0.001958 -0.000000 0.002503 +v 0.005887 -0.000000 0.003527 +v -0.002127 -0.000000 0.002678 +v -0.000247 -0.000000 0.002900 +v -0.002302 -0.000000 0.002862 +v -0.003597 -0.000000 0.004230 +v -0.000061 -0.000000 0.003257 +v 0.000139 -0.000000 0.003644 +v 0.006598 -0.000000 0.004699 +v 0.000352 -0.000000 0.004056 +v 0.000576 -0.000000 0.004490 +v -0.003609 -0.000000 0.006258 +v 0.000809 -0.000000 0.004941 +v 0.007262 -0.000000 0.005795 +v 0.000843 -0.000000 0.005005 +v 0.000935 -0.000000 0.005187 +v 0.001076 -0.000000 0.005462 +v 0.001251 -0.000000 0.005805 +v 0.007842 -0.000000 0.006753 +v 0.001450 -0.000000 0.006194 +v 0.001662 -0.000000 0.006607 +v -0.003609 -0.000000 0.008275 +v 0.001873 -0.000000 0.007021 +v 0.008301 -0.000000 0.007515 +v 0.002072 -0.000000 0.007410 +v 0.002248 -0.000000 0.007753 +v 0.008603 -0.000000 0.008023 +v 0.002388 -0.000000 0.008027 +v 0.008712 -0.000000 0.008213 +v 0.002481 -0.000000 0.008209 +v 0.002514 -0.000000 0.008275 +v -0.006123 -0.000000 0.008249 +v 0.008684 -0.000000 0.008221 +v 0.008601 -0.000000 0.008230 +v 0.008468 -0.000000 0.008237 +v 0.008287 -0.000000 0.008245 +v 0.008064 -0.000000 0.008252 +v 0.007802 -0.000000 0.008257 +v 0.007504 -0.000000 0.008263 +v 0.007174 -0.000000 0.008267 +v 0.006817 -0.000000 0.008271 +v 0.006435 -0.000000 0.008272 +v 0.006032 -0.000000 0.008275 +v 0.005613 -0.000000 0.008275 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.054 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 7//1 8//1 +f 6//1 9//1 7//1 +f 10//1 9//1 6//1 +f 10//1 11//1 9//1 +f 12//1 11//1 10//1 +f 13//1 11//1 12//1 +f 14//1 11//1 13//1 +f 14//1 15//1 11//1 +f 16//1 15//1 14//1 +f 16//1 17//1 15//1 +f 18//1 17//1 16//1 +f 18//1 19//1 17//1 +f 20//1 19//1 18//1 +f 21//1 19//1 20//1 +f 21//1 22//1 19//1 +f 23//1 22//1 21//1 +f 24//1 22//1 23//1 +f 24//1 25//1 22//1 +f 26//1 25//1 24//1 +f 27//1 25//1 26//1 +f 28//1 25//1 27//1 +f 29//1 25//1 28//1 +f 29//1 30//1 25//1 +f 29//1 31//1 30//1 +f 29//1 32//1 31//1 +f 29//1 33//1 32//1 +f 29//1 34//1 33//1 +f 29//1 35//1 34//1 +f 29//1 36//1 35//1 +f 29//1 37//1 36//1 +f 29//1 38//1 37//1 +f 29//1 39//1 38//1 +f 29//1 40//1 39//1 +f 29//1 41//1 40//1 +f 29//1 42//1 41//1 +f 29//1 43//1 42//1 +f 1//1 44//1 5//1 +f 44//1 43//1 29//1 +f 44//1 45//1 43//1 +f 44//1 46//1 45//1 +f 44//1 47//1 46//1 +f 44//1 48//1 47//1 +f 44//1 49//1 48//1 +f 44//1 50//1 49//1 +f 44//1 51//1 50//1 +f 44//1 52//1 51//1 +f 1//1 52//1 44//1 +f 1//1 53//1 52//1 +f 1//1 54//1 53//1 +f 1//1 55//1 54//1 +f 1//1 56//1 55//1 +f 1//1 57//1 56//1 +f 1//1 58//1 57//1 +f 1//1 59//1 58//1 +f 60//1 61//1 1//1 +f 61//1 62//1 1//1 +f 62//1 63//1 1//1 +f 63//1 59//1 1//1 +f 63//1 64//1 59//1 +f 64//1 65//1 59//1 +f 64//1 66//1 65//1 +f 67//1 66//1 64//1 +f 68//1 66//1 67//1 +f 60//1 69//1 61//1 +f 70//1 66//1 68//1 +f 60//1 71//1 69//1 +f 60//1 72//1 71//1 +f 73//1 66//1 70//1 +f 60//1 74//1 72//1 +f 60//1 75//1 74//1 +f 76//1 66//1 73//1 +f 60//1 77//1 75//1 +f 78//1 66//1 76//1 +f 60//1 79//1 77//1 +f 78//1 80//1 66//1 +f 60//1 81//1 79//1 +f 82//1 80//1 78//1 +f 60//1 83//1 81//1 +f 60//1 84//1 83//1 +f 85//1 80//1 82//1 +f 86//1 80//1 85//1 +f 86//1 87//1 80//1 +f 88//1 87//1 86//1 +f 89//1 87//1 88//1 +f 60//1 90//1 84//1 +f 91//1 87//1 89//1 +f 91//1 92//1 87//1 +f 93//1 92//1 91//1 +f 94//1 92//1 93//1 +f 95//1 92//1 94//1 +f 96//1 92//1 95//1 +f 96//1 97//1 92//1 +f 98//1 97//1 96//1 +f 99//1 97//1 98//1 +f 60//1 100//1 90//1 +f 101//1 97//1 99//1 +f 101//1 102//1 97//1 +f 103//1 102//1 101//1 +f 104//1 102//1 103//1 +f 104//1 105//1 102//1 +f 106//1 105//1 104//1 +f 106//1 107//1 105//1 +f 108//1 107//1 106//1 +f 109//1 107//1 108//1 +f 110//1 100//1 60//1 +f 109//1 111//1 107//1 +f 109//1 112//1 111//1 +f 109//1 113//1 112//1 +f 109//1 114//1 113//1 +f 109//1 115//1 114//1 +f 109//1 116//1 115//1 +f 109//1 117//1 116//1 +f 109//1 118//1 117//1 +f 109//1 119//1 118//1 +f 109//1 120//1 119//1 +f 109//1 121//1 120//1 +f 109//1 122//1 121//1 diff --git a/alphanumeric/L.mtl b/alphanumeric/L.mtl new file mode 100644 index 0000000..0e018dd --- /dev/null +++ b/alphanumeric/L.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.056 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/L.obj b/alphanumeric/L.obj new file mode 100644 index 0000000..b23395f --- /dev/null +++ b/alphanumeric/L.obj @@ -0,0 +1,96 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib L.mtl +o L +v -0.006534 0.000000 -0.000000 +v -0.003983 0.000000 -0.008213 +v -0.006534 0.000000 -0.008213 +v -0.001431 0.000000 -0.008213 +v -0.001431 0.000000 -0.008092 +v -0.001431 0.000000 -0.007757 +v -0.001431 0.000000 -0.007253 +v -0.001431 0.000000 -0.006618 +v -0.001431 0.000000 -0.005901 +v -0.001431 0.000000 -0.005139 +v -0.001431 0.000000 -0.004378 +v -0.001431 0.000000 -0.003660 +v -0.001431 0.000000 -0.003026 +v -0.001431 0.000000 -0.002521 +v -0.001431 0.000000 -0.002187 +v -0.001431 0.000000 -0.002066 +v -0.001431 0.000000 -0.000791 +v -0.001429 -0.000000 0.000297 +v -0.006534 -0.000000 0.008213 +v -0.001425 -0.000000 0.001213 +v -0.001418 -0.000000 0.001973 +v -0.001409 -0.000000 0.002590 +v -0.001397 -0.000000 0.003080 +v -0.001381 -0.000000 0.003457 +v -0.001361 -0.000000 0.003735 +v -0.001337 -0.000000 0.003930 +v -0.001307 -0.000000 0.004054 +v -0.001273 -0.000000 0.004126 +v -0.001232 -0.000000 0.004155 +v -0.001170 -0.000000 0.004166 +v -0.001043 -0.000000 0.004174 +v -0.000855 -0.000000 0.004184 +v -0.000612 -0.000000 0.004192 +v -0.000318 -0.000000 0.004200 +v 0.000022 -0.000000 0.004208 +v 0.000402 -0.000000 0.004213 +v 0.000819 -0.000000 0.004220 +v 0.001267 -0.000000 0.004224 +v 0.001741 -0.000000 0.004228 +v 0.002237 -0.000000 0.004229 +v 0.002750 -0.000000 0.004231 +v 0.006534 -0.000000 0.004231 +v 0.006534 -0.000000 0.006222 +v 0.006534 -0.000000 0.008213 +v -0.000000 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.056 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 19//1 18//1 1//1 +f 19//1 20//1 18//1 +f 19//1 21//1 20//1 +f 19//1 22//1 21//1 +f 19//1 23//1 22//1 +f 19//1 24//1 23//1 +f 19//1 25//1 24//1 +f 19//1 26//1 25//1 +f 19//1 27//1 26//1 +f 19//1 28//1 27//1 +f 19//1 29//1 28//1 +f 19//1 30//1 29//1 +f 19//1 31//1 30//1 +f 19//1 32//1 31//1 +f 19//1 33//1 32//1 +f 19//1 34//1 33//1 +f 19//1 35//1 34//1 +f 19//1 36//1 35//1 +f 19//1 37//1 36//1 +f 19//1 38//1 37//1 +f 19//1 39//1 38//1 +f 19//1 40//1 39//1 +f 19//1 41//1 40//1 +f 19//1 42//1 41//1 +f 19//1 43//1 42//1 +f 19//1 44//1 43//1 +f 45//2 44//2 19//2 diff --git a/alphanumeric/M.mtl b/alphanumeric/M.mtl new file mode 100644 index 0000000..f19ffd1 --- /dev/null +++ b/alphanumeric/M.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.057 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/M.obj b/alphanumeric/M.obj new file mode 100644 index 0000000..8d454e1 --- /dev/null +++ b/alphanumeric/M.obj @@ -0,0 +1,329 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib M.mtl +o M +v -0.009241 0.000000 -0.000055 +v -0.005943 0.000000 -0.008267 +v -0.009241 0.000000 -0.008267 +v -0.002645 0.000000 -0.008267 +v -0.002643 0.000000 -0.008261 +v 0.002408 0.000000 -0.007620 +v 0.005918 0.000000 -0.008242 +v 0.002607 0.000000 -0.008267 +v -0.002636 0.000000 -0.008246 +v -0.002627 0.000000 -0.008222 +v 0.009241 0.000000 -0.008205 +v -0.002616 0.000000 -0.008193 +v 0.009241 -0.000000 0.000007 +v -0.002603 0.000000 -0.008159 +v -0.002589 0.000000 -0.008124 +v -0.002575 0.000000 -0.008088 +v -0.002562 0.000000 -0.008055 +v -0.002550 0.000000 -0.008026 +v -0.002541 0.000000 -0.008002 +v -0.002535 0.000000 -0.007986 +v -0.002533 0.000000 -0.007981 +v -0.002507 0.000000 -0.007904 +v -0.002462 0.000000 -0.007750 +v -0.002400 0.000000 -0.007526 +v 0.002374 0.000000 -0.007492 +v -0.002321 0.000000 -0.007238 +v 0.002321 0.000000 -0.007292 +v 0.002252 0.000000 -0.007027 +v -0.002227 0.000000 -0.006890 +v 0.002167 0.000000 -0.006702 +v -0.002121 0.000000 -0.006489 +v 0.002070 0.000000 -0.006323 +v 0.005009 0.000000 -0.003924 +v -0.004947 0.000000 -0.003701 +v -0.002002 0.000000 -0.006041 +v 0.001960 0.000000 -0.005897 +v -0.001874 0.000000 -0.005551 +v 0.001840 0.000000 -0.005429 +v -0.001736 0.000000 -0.005025 +v 0.001711 0.000000 -0.004926 +v -0.001592 0.000000 -0.004469 +v 0.001575 0.000000 -0.004393 +v -0.001442 0.000000 -0.003889 +v 0.001433 0.000000 -0.003838 +v 0.003453 -0.000000 0.002148 +v 0.005047 -0.000000 0.002110 +v -0.001288 0.000000 -0.003290 +v 0.001287 0.000000 -0.003265 +v -0.005009 -0.000000 0.002260 +v -0.003454 -0.000000 0.002123 +v -0.001134 0.000000 -0.002691 +v 0.001139 0.000000 -0.002680 +v -0.000984 0.000000 -0.002111 +v 0.000957 0.000000 -0.001978 +v -0.000840 0.000000 -0.001557 +v 0.000792 0.000000 -0.001345 +v -0.000703 0.000000 -0.001033 +v 0.000642 0.000000 -0.000781 +v -0.000575 0.000000 -0.000545 +v 0.000508 0.000000 -0.000285 +v -0.000457 0.000000 -0.000100 +v 0.000389 -0.000000 0.000144 +v -0.000352 -0.000000 0.000297 +v -0.009241 -0.000000 0.008158 +v 0.009241 -0.000000 0.008220 +v 0.000285 -0.000000 0.000508 +v -0.000259 -0.000000 0.000641 +v 0.000194 -0.000000 0.000807 +v -0.000182 -0.000000 0.000925 +v 0.000118 -0.000000 0.001043 +v -0.000121 -0.000000 0.001144 +v 0.000055 -0.000000 0.001215 +v -0.000079 -0.000000 0.001292 +v 0.000006 -0.000000 0.001325 +v -0.000056 -0.000000 0.001364 +v -0.000031 -0.000000 0.001374 +v 0.005071 -0.000000 0.008145 +v -0.003253 -0.000000 0.002911 +v 0.001898 -0.000000 0.008220 +v -0.005072 -0.000000 0.008220 +v -0.003057 -0.000000 0.003670 +v -0.002868 -0.000000 0.004395 +v -0.002687 -0.000000 0.005077 +v -0.002517 -0.000000 0.005709 +v -0.002360 -0.000000 0.006283 +v -0.002218 -0.000000 0.006793 +v -0.002092 -0.000000 0.007232 +v -0.001985 -0.000000 0.007591 +v -0.001899 -0.000000 0.007863 +v -0.001837 -0.000000 0.008042 +v -0.001799 -0.000000 0.008120 +v -0.001758 -0.000000 0.008150 +v 0.005482 -0.000000 0.008220 +v -0.001703 -0.000000 0.008175 +v -0.008955 -0.000000 0.008220 +v -0.001632 -0.000000 0.008198 +v -0.001542 -0.000000 0.008217 +v -0.001431 -0.000000 0.008232 +v -0.008901 -0.000000 0.008229 +v -0.005107 -0.000000 0.008220 +v 0.001863 -0.000000 0.008220 +v 0.005550 -0.000000 0.008229 +v 0.009208 -0.000000 0.008220 +v -0.005205 -0.000000 0.008222 +v 0.001767 -0.000000 0.008222 +v 0.009116 -0.000000 0.008222 +v -0.005354 -0.000000 0.008226 +v 0.001622 -0.000000 0.008226 +v 0.008978 -0.000000 0.008226 +v -0.005539 -0.000000 0.008229 +v 0.001440 -0.000000 0.008229 +v 0.008805 -0.000000 0.008229 +v -0.008818 -0.000000 0.008236 +v 0.005646 -0.000000 0.008237 +v -0.005750 -0.000000 0.008234 +v 0.001233 -0.000000 0.008234 +v 0.008609 -0.000000 0.008234 +v -0.001296 -0.000000 0.008245 +v -0.005974 -0.000000 0.008238 +v 0.001014 -0.000000 0.008238 +v 0.008401 -0.000000 0.008238 +v -0.008707 -0.000000 0.008243 +v 0.005767 -0.000000 0.008245 +v -0.006197 -0.000000 0.008243 +v 0.000795 -0.000000 0.008243 +v 0.008192 -0.000000 0.008243 +v -0.006408 -0.000000 0.008247 +v 0.000589 -0.000000 0.008247 +v 0.007996 -0.000000 0.008247 +v -0.008571 -0.000000 0.008249 +v -0.001136 -0.000000 0.008254 +v 0.005910 -0.000000 0.008252 +v -0.006594 -0.000000 0.008251 +v 0.000407 -0.000000 0.008251 +v 0.007823 -0.000000 0.008251 +v -0.008412 -0.000000 0.008253 +v -0.006743 -0.000000 0.008254 +v 0.000261 -0.000000 0.008254 +v 0.007685 -0.000000 0.008254 +v 0.006074 -0.000000 0.008257 +v -0.008233 -0.000000 0.008257 +v -0.000948 -0.000000 0.008260 +v -0.006841 -0.000000 0.008256 +v 0.000165 -0.000000 0.008256 +v 0.007594 -0.000000 0.008256 +v -0.006876 -0.000000 0.008257 +v 0.000131 -0.000000 0.008257 +v 0.007560 -0.000000 0.008257 +v -0.008036 -0.000000 0.008260 +v -0.007123 -0.000000 0.008260 +v -0.000192 -0.000000 0.008262 +v 0.007330 -0.000000 0.008262 +v 0.006255 -0.000000 0.008262 +v -0.007824 -0.000000 0.008261 +v -0.007366 -0.000000 0.008261 +v -0.000729 -0.000000 0.008264 +v -0.007600 -0.000000 0.008262 +v 0.006451 -0.000000 0.008265 +v -0.000478 -0.000000 0.008264 +v 0.007101 -0.000000 0.008266 +v 0.006659 -0.000000 0.008267 +v 0.006877 -0.000000 0.008267 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.057 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 7//1 8//1 +f 1//1 9//1 5//1 +f 1//1 10//1 9//1 +f 6//1 11//1 7//1 +f 1//1 12//1 10//1 +f 6//1 13//1 11//1 +f 1//1 14//1 12//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 1//1 20//1 19//1 +f 1//1 21//1 20//1 +f 1//1 22//1 21//1 +f 1//1 23//1 22//1 +f 1//1 24//1 23//1 +f 25//1 13//1 6//1 +f 1//1 26//1 24//1 +f 27//1 13//1 25//1 +f 28//1 13//1 27//1 +f 1//1 29//1 26//1 +f 30//1 13//1 28//1 +f 1//1 31//1 29//1 +f 32//1 33//1 30//1 +f 33//1 13//1 30//1 +f 1//1 34//1 31//1 +f 34//1 35//1 31//1 +f 36//1 33//1 32//1 +f 34//1 37//1 35//1 +f 38//1 33//1 36//1 +f 34//1 39//1 37//1 +f 40//1 33//1 38//1 +f 34//1 41//1 39//1 +f 42//1 33//1 40//1 +f 34//1 43//1 41//1 +f 44//1 33//1 42//1 +f 44//1 45//1 33//1 +f 46//1 13//1 33//1 +f 34//1 47//1 43//1 +f 48//1 45//1 44//1 +f 1//1 49//1 34//1 +f 50//1 47//1 34//1 +f 50//1 51//1 47//1 +f 52//1 45//1 48//1 +f 50//1 53//1 51//1 +f 54//1 45//1 52//1 +f 50//1 55//1 53//1 +f 56//1 45//1 54//1 +f 50//1 57//1 55//1 +f 58//1 45//1 56//1 +f 50//1 59//1 57//1 +f 60//1 45//1 58//1 +f 50//1 61//1 59//1 +f 62//1 45//1 60//1 +f 50//1 63//1 61//1 +f 64//1 49//1 1//1 +f 46//1 65//1 13//1 +f 66//1 45//1 62//1 +f 50//1 67//1 63//1 +f 68//1 45//1 66//1 +f 50//1 69//1 67//1 +f 70//1 45//1 68//1 +f 50//1 71//1 69//1 +f 72//1 45//1 70//1 +f 50//1 73//1 71//1 +f 74//1 45//1 72//1 +f 50//1 75//1 73//1 +f 76//1 45//1 74//1 +f 50//1 76//1 75//1 +f 50//1 45//1 76//1 +f 77//1 65//1 46//1 +f 78//1 45//1 50//1 +f 78//1 79//1 45//1 +f 64//1 80//1 49//1 +f 81//1 79//1 78//1 +f 82//1 79//1 81//1 +f 83//1 79//1 82//1 +f 84//1 79//1 83//1 +f 85//1 79//1 84//1 +f 86//1 79//1 85//1 +f 87//1 79//1 86//1 +f 88//1 79//1 87//1 +f 89//1 79//1 88//1 +f 90//1 79//1 89//1 +f 91//1 79//1 90//1 +f 92//1 79//1 91//1 +f 93//1 65//1 77//1 +f 94//1 79//1 92//1 +f 95//1 80//1 64//1 +f 96//1 79//1 94//1 +f 97//1 79//1 96//1 +f 98//1 79//1 97//1 +f 99//1 80//1 95//1 +f 99//1 100//1 80//1 +f 98//1 101//1 79//1 +f 102//1 65//1 93//1 +f 102//1 103//1 65//1 +f 99//1 104//1 100//1 +f 98//1 105//1 101//1 +f 102//1 106//1 103//1 +f 99//1 107//1 104//1 +f 98//1 108//1 105//1 +f 102//1 109//1 106//1 +f 99//1 110//1 107//1 +f 98//1 111//1 108//1 +f 102//1 112//1 109//1 +f 113//1 110//1 99//1 +f 114//1 112//1 102//1 +f 113//1 115//1 110//1 +f 98//1 116//1 111//1 +f 114//1 117//1 112//1 +f 118//1 116//1 98//1 +f 113//1 119//1 115//1 +f 118//1 120//1 116//1 +f 114//1 121//1 117//1 +f 122//1 119//1 113//1 +f 123//1 121//1 114//1 +f 122//1 124//1 119//1 +f 118//1 125//1 120//1 +f 123//1 126//1 121//1 +f 122//1 127//1 124//1 +f 118//1 128//1 125//1 +f 123//1 129//1 126//1 +f 130//1 127//1 122//1 +f 131//1 128//1 118//1 +f 132//1 129//1 123//1 +f 130//1 133//1 127//1 +f 131//1 134//1 128//1 +f 132//1 135//1 129//1 +f 136//1 133//1 130//1 +f 136//1 137//1 133//1 +f 131//1 138//1 134//1 +f 132//1 139//1 135//1 +f 140//1 139//1 132//1 +f 141//1 137//1 136//1 +f 142//1 138//1 131//1 +f 141//1 143//1 137//1 +f 142//1 144//1 138//1 +f 140//1 145//1 139//1 +f 141//1 146//1 143//1 +f 142//1 147//1 144//1 +f 140//1 148//1 145//1 +f 149//1 146//1 141//1 +f 149//1 150//1 146//1 +f 142//1 151//1 147//1 +f 140//1 152//1 148//1 +f 153//1 152//1 140//1 +f 154//1 150//1 149//1 +f 154//1 155//1 150//1 +f 156//1 151//1 142//1 +f 157//1 155//1 154//1 +f 158//1 152//1 153//1 +f 156//1 159//1 151//1 +f 158//1 160//1 152//1 +f 161//1 160//1 158//1 +f 161//1 162//1 160//1 diff --git a/alphanumeric/N.mtl b/alphanumeric/N.mtl new file mode 100644 index 0000000..61405ea --- /dev/null +++ b/alphanumeric/N.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.059 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/N.obj b/alphanumeric/N.obj new file mode 100644 index 0000000..49a7467 --- /dev/null +++ b/alphanumeric/N.obj @@ -0,0 +1,268 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib N.mtl +o N +v -0.006298 0.000000 -0.008256 +v -0.005708 0.000000 -0.008255 +v -0.006010 0.000000 -0.008256 +v -0.006568 0.000000 -0.008254 +v -0.005395 0.000000 -0.008253 +v -0.006819 0.000000 -0.008251 +v -0.003081 0.000000 -0.008216 +v -0.007047 0.000000 -0.008247 +v -0.007251 0.000000 -0.008242 +v -0.007426 0.000000 -0.008236 +v -0.007569 0.000000 -0.008229 +v -0.007679 0.000000 -0.008221 +v -0.007752 0.000000 -0.008212 +v -0.000032 0.000000 -0.003736 +v 0.003080 0.000000 -0.003736 +v 0.005507 0.000000 -0.008216 +v 0.003142 0.000000 -0.008216 +v 0.007872 0.000000 -0.008216 +v 0.007872 0.000000 -0.000004 +v -0.007785 0.000000 -0.008203 +v -0.007797 0.000000 -0.008117 +v -0.007809 0.000000 -0.007888 +v -0.007819 0.000000 -0.007527 +v -0.007830 0.000000 -0.007045 +v -0.007839 0.000000 -0.006450 +v -0.007847 0.000000 -0.005754 +v -0.007855 0.000000 -0.004965 +v -0.007861 0.000000 -0.004094 +v -0.007865 0.000000 -0.003152 +v 0.003017 -0.000000 0.000731 +v -0.007869 0.000000 -0.002147 +v -0.007872 0.000000 -0.001090 +v -0.002843 0.000000 -0.000432 +v -0.007872 -0.000000 0.000009 +v -0.002865 0.000000 -0.000383 +v -0.002819 0.000000 -0.000427 +v -0.002775 0.000000 -0.000364 +v -0.002886 0.000000 -0.000276 +v -0.002681 0.000000 -0.000229 +v -0.002904 0.000000 -0.000110 +v -0.002542 0.000000 -0.000028 +v -0.002922 -0.000000 0.000121 +v -0.002361 -0.000000 0.000235 +v 0.007872 -0.000000 0.008209 +v -0.007872 -0.000000 0.000169 +v -0.002938 -0.000000 0.000420 +v -0.007872 -0.000000 0.000611 +v -0.002141 -0.000000 0.000554 +v -0.002953 -0.000000 0.000788 +v -0.001888 -0.000000 0.000923 +v -0.007872 -0.000000 0.001278 +v -0.002967 -0.000000 0.001231 +v -0.001603 -0.000000 0.001339 +v -0.002980 -0.000000 0.001750 +v -0.007872 -0.000000 0.002115 +v -0.001291 -0.000000 0.001795 +v -0.002993 -0.000000 0.002350 +v -0.000956 -0.000000 0.002286 +v -0.007872 -0.000000 0.003065 +v -0.000600 -0.000000 0.002806 +v -0.003006 -0.000000 0.003034 +v -0.000229 -0.000000 0.003351 +v -0.003019 -0.000000 0.003804 +v -0.007872 -0.000000 0.004071 +v 0.000155 -0.000000 0.003916 +v -0.003081 -0.000000 0.008209 +v 0.000542 -0.000000 0.004480 +v -0.007872 -0.000000 0.005078 +v 0.000915 -0.000000 0.005023 +v 0.001271 -0.000000 0.005540 +v -0.007872 -0.000000 0.006028 +v 0.001607 -0.000000 0.006026 +v 0.001918 -0.000000 0.006475 +v -0.007872 -0.000000 0.006865 +v 0.002202 -0.000000 0.006884 +v -0.007872 -0.000000 0.007532 +v 0.002455 -0.000000 0.007245 +v 0.002674 -0.000000 0.007555 +v -0.007872 -0.000000 0.007974 +v 0.002854 -0.000000 0.007807 +v 0.002992 -0.000000 0.007998 +v -0.007872 -0.000000 0.008134 +v 0.003085 -0.000000 0.008121 +v 0.003129 -0.000000 0.008171 +v -0.007462 -0.000000 0.008209 +v 0.003169 -0.000000 0.008186 +v 0.003248 -0.000000 0.008199 +v 0.003364 -0.000000 0.008211 +v -0.007391 -0.000000 0.008218 +v -0.003120 -0.000000 0.008209 +v 0.007826 -0.000000 0.008209 +v 0.007701 -0.000000 0.008211 +v -0.003228 -0.000000 0.008211 +v 0.007511 -0.000000 0.008213 +v 0.003513 -0.000000 0.008221 +v -0.003392 -0.000000 0.008215 +v 0.007275 -0.000000 0.008215 +v -0.003597 -0.000000 0.008218 +v 0.007005 -0.000000 0.008218 +v -0.007287 -0.000000 0.008226 +v 0.006721 -0.000000 0.008221 +v -0.003830 -0.000000 0.008223 +v 0.003693 -0.000000 0.008229 +v 0.006435 -0.000000 0.008224 +v -0.004076 -0.000000 0.008227 +v 0.006167 -0.000000 0.008227 +v -0.007153 -0.000000 0.008234 +v 0.005929 -0.000000 0.008230 +v -0.004322 -0.000000 0.008232 +v 0.003901 -0.000000 0.008235 +v 0.005739 -0.000000 0.008232 +v 0.005614 -0.000000 0.008233 +v -0.004556 -0.000000 0.008236 +v 0.005569 -0.000000 0.008234 +v 0.005256 -0.000000 0.008239 +v -0.006991 -0.000000 0.008241 +v 0.004134 -0.000000 0.008240 +v -0.004761 -0.000000 0.008240 +v 0.004952 -0.000000 0.008242 +v 0.004389 -0.000000 0.008242 +v -0.004925 -0.000000 0.008243 +v -0.006806 -0.000000 0.008246 +v 0.004664 -0.000000 0.008243 +v -0.005033 -0.000000 0.008245 +v -0.005072 -0.000000 0.008246 +v -0.005345 -0.000000 0.008251 +v -0.006598 -0.000000 0.008251 +v -0.006372 -0.000000 0.008254 +v -0.005615 -0.000000 0.008255 +v -0.006131 -0.000000 0.008256 +v -0.005878 -0.000000 0.008256 +vn 0.0000 1.0000 0.0000 +vn 0.0000 1.0000 0.0003 +usemtl SVGMat.059 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 9//1 7//1 8//1 +f 10//1 7//1 9//1 +f 11//1 7//1 10//1 +f 12//1 7//1 11//1 +f 13//1 7//1 12//1 +f 13//1 14//1 7//1 +f 15//1 16//1 17//1 +f 15//1 18//1 16//1 +f 15//1 19//1 18//1 +f 20//1 14//1 13//1 +f 21//1 14//1 20//1 +f 22//1 14//1 21//1 +f 23//1 14//1 22//1 +f 24//1 14//1 23//1 +f 25//1 14//1 24//1 +f 26//1 14//1 25//1 +f 27//1 14//1 26//1 +f 28//1 14//1 27//1 +f 29//1 14//1 28//1 +f 29//1 30//1 14//1 +f 30//1 19//1 15//1 +f 31//1 30//1 29//1 +f 32//1 33//1 31//1 +f 33//1 30//1 31//1 +f 34//1 33//1 32//1 +f 34//1 35//1 33//1 +f 36//1 30//1 33//1 +f 37//1 30//1 36//1 +f 34//1 38//1 35//1 +f 39//1 30//1 37//1 +f 34//1 40//1 38//1 +f 41//1 30//1 39//1 +f 34//1 42//1 40//1 +f 43//1 30//1 41//1 +f 30//1 44//1 19//1 +f 45//1 42//1 34//1 +f 45//1 46//1 42//1 +f 47//1 46//1 45//1 +f 48//1 30//1 43//1 +f 47//1 49//1 46//1 +f 50//1 30//1 48//1 +f 51//1 49//1 47//1 +f 50//1 44//1 30//1 +f 51//1 52//1 49//1 +f 53//1 44//1 50//1 +f 51//1 54//1 52//1 +f 55//1 54//1 51//1 +f 56//1 44//1 53//1 +f 55//1 57//1 54//1 +f 58//1 44//1 56//1 +f 59//1 57//1 55//1 +f 60//1 44//1 58//1 +f 59//1 61//1 57//1 +f 62//1 44//1 60//1 +f 59//1 63//1 61//1 +f 64//1 63//1 59//1 +f 65//1 44//1 62//1 +f 64//1 66//1 63//1 +f 67//1 44//1 65//1 +f 68//1 66//1 64//1 +f 69//1 44//1 67//1 +f 70//1 44//1 69//1 +f 71//1 66//1 68//1 +f 72//1 44//1 70//1 +f 73//1 44//1 72//1 +f 74//1 66//1 71//1 +f 75//1 44//1 73//1 +f 76//1 66//1 74//1 +f 77//1 44//1 75//1 +f 78//1 44//1 77//1 +f 79//1 66//1 76//1 +f 80//1 44//1 78//1 +f 81//1 44//1 80//1 +f 82//1 66//1 79//1 +f 83//1 44//1 81//1 +f 84//1 44//1 83//1 +f 85//1 66//1 82//1 +f 86//1 44//1 84//1 +f 87//1 44//1 86//1 +f 88//1 44//1 87//1 +f 89//1 66//1 85//1 +f 89//1 90//1 66//1 +f 88//1 91//2 44//1 +f 88//1 92//1 91//2 +f 89//1 93//1 90//1 +f 88//1 94//1 92//1 +f 95//1 94//1 88//1 +f 89//1 96//1 93//1 +f 95//1 97//1 94//1 +f 89//1 98//1 96//1 +f 95//1 99//1 97//1 +f 100//1 98//1 89//1 +f 95//1 101//1 99//1 +f 100//1 102//1 98//1 +f 103//1 101//1 95//1 +f 103//1 104//1 101//1 +f 100//1 105//1 102//1 +f 103//1 106//1 104//1 +f 107//1 105//1 100//1 +f 103//1 108//1 106//1 +f 107//1 109//1 105//1 +f 110//1 108//1 103//1 +f 110//1 111//1 108//1 +f 110//1 112//1 111//1 +f 107//1 113//1 109//1 +f 110//1 114//1 112//1 +f 110//1 115//1 114//1 +f 116//1 113//1 107//1 +f 117//1 115//1 110//1 +f 116//1 118//1 113//1 +f 117//1 119//1 115//1 +f 120//1 119//1 117//1 +f 116//1 121//1 118//1 +f 122//1 121//1 116//1 +f 120//1 123//1 119//1 +f 122//1 124//1 121//1 +f 122//1 125//1 124//1 +f 122//1 126//1 125//1 +f 127//1 126//1 122//1 +f 128//1 126//1 127//1 +f 128//1 129//1 126//1 +f 130//1 129//1 128//1 +f 130//1 131//1 129//1 diff --git a/alphanumeric/O.mtl b/alphanumeric/O.mtl new file mode 100644 index 0000000..39112a6 --- /dev/null +++ b/alphanumeric/O.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.061 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/O.obj b/alphanumeric/O.obj new file mode 100644 index 0000000..6bc1df9 --- /dev/null +++ b/alphanumeric/O.obj @@ -0,0 +1,367 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib O.mtl +o O +v -0.000349 0.000000 -0.008460 +v 0.000276 0.000000 -0.008458 +v -0.000037 0.000000 -0.008462 +v -0.000655 0.000000 -0.008452 +v 0.000588 0.000000 -0.008448 +v -0.000953 0.000000 -0.008438 +v 0.000896 0.000000 -0.008433 +v -0.001239 0.000000 -0.008418 +v 0.001198 0.000000 -0.008411 +v -0.001512 0.000000 -0.008392 +v 0.001489 0.000000 -0.008383 +v -0.001767 0.000000 -0.008361 +v 0.001767 0.000000 -0.008348 +v -0.002558 0.000000 -0.008210 +v 0.002431 0.000000 -0.008234 +v 0.003067 0.000000 -0.008080 +v -0.003310 0.000000 -0.007995 +v 0.003673 0.000000 -0.007885 +v -0.004022 0.000000 -0.007718 +v 0.004248 0.000000 -0.007651 +v -0.004689 0.000000 -0.007381 +v 0.004791 0.000000 -0.007378 +v -0.005311 0.000000 -0.006988 +v 0.005300 0.000000 -0.007067 +v 0.005775 0.000000 -0.006718 +v -0.005884 0.000000 -0.006539 +v 0.006214 0.000000 -0.006334 +v -0.006405 0.000000 -0.006038 +v 0.006617 0.000000 -0.005913 +v -0.006873 0.000000 -0.005487 +v 0.006981 0.000000 -0.005457 +v -0.007284 0.000000 -0.004887 +v 0.007307 0.000000 -0.004966 +v 0.007592 0.000000 -0.004441 +v -0.007636 0.000000 -0.004242 +v -0.000324 0.000000 -0.004584 +v -0.000108 0.000000 -0.004584 +v 0.000118 0.000000 -0.004574 +v -0.000527 0.000000 -0.004573 +v 0.000348 0.000000 -0.004555 +v -0.000711 0.000000 -0.004550 +v 0.000577 0.000000 -0.004526 +v -0.000871 0.000000 -0.004516 +v 0.000800 0.000000 -0.004489 +v -0.001139 0.000000 -0.004433 +v 0.001012 0.000000 -0.004444 +v 0.001208 0.000000 -0.004392 +v 0.007736 0.000000 -0.004142 +v -0.001394 0.000000 -0.004329 +v 0.001382 0.000000 -0.004333 +v 0.001531 0.000000 -0.004267 +v -0.001636 0.000000 -0.004204 +v 0.001698 0.000000 -0.004172 +v -0.007926 0.000000 -0.003554 +v -0.001865 0.000000 -0.004058 +v 0.001863 0.000000 -0.004060 +v 0.007864 0.000000 -0.003851 +v 0.002025 0.000000 -0.003935 +v -0.002079 0.000000 -0.003892 +v 0.002182 0.000000 -0.003796 +v -0.002278 0.000000 -0.003707 +v 0.007977 0.000000 -0.003566 +v 0.002333 0.000000 -0.003646 +v -0.002461 0.000000 -0.003504 +v 0.002477 0.000000 -0.003485 +v 0.008077 0.000000 -0.003281 +v -0.008152 0.000000 -0.002824 +v -0.002628 0.000000 -0.003282 +v 0.002612 0.000000 -0.003315 +v 0.002738 0.000000 -0.003137 +v -0.002779 0.000000 -0.003043 +v 0.008162 0.000000 -0.002993 +v 0.002854 0.000000 -0.002953 +v -0.002912 0.000000 -0.002787 +v 0.008236 0.000000 -0.002699 +v 0.002957 0.000000 -0.002764 +v -0.008230 0.000000 -0.002487 +v -0.003027 0.000000 -0.002514 +v 0.003048 0.000000 -0.002571 +v 0.008299 0.000000 -0.002395 +v 0.003124 0.000000 -0.002376 +v -0.003124 0.000000 -0.002226 +v -0.008300 0.000000 -0.002137 +v 0.008352 0.000000 -0.002076 +v 0.003198 0.000000 -0.002111 +v -0.003198 0.000000 -0.001928 +v -0.008358 0.000000 -0.001778 +v 0.003259 0.000000 -0.001781 +v 0.008398 0.000000 -0.001740 +v -0.003259 0.000000 -0.001584 +v 0.003306 0.000000 -0.001397 +v -0.008408 0.000000 -0.001410 +v 0.008433 0.000000 -0.001381 +v -0.003306 0.000000 -0.001205 +v -0.008446 0.000000 -0.001037 +v 0.003340 0.000000 -0.000973 +v 0.008464 0.000000 -0.000997 +v -0.003339 0.000000 -0.000800 +v -0.008474 0.000000 -0.000660 +v 0.008488 0.000000 -0.000584 +v 0.003361 0.000000 -0.000521 +v -0.003359 0.000000 -0.000377 +v -0.008491 0.000000 -0.000283 +v 0.008497 -0.000000 0.000521 +v 0.003368 0.000000 -0.000054 +v -0.003365 -0.000000 0.000055 +v -0.008497 -0.000000 0.000093 +v 0.003362 -0.000000 0.000416 +v -0.003357 -0.000000 0.000487 +v -0.008493 -0.000000 0.000464 +v 0.003343 -0.000000 0.000875 +v -0.008478 -0.000000 0.000829 +v -0.003336 -0.000000 0.000909 +v 0.008421 -0.000000 0.001562 +v -0.008451 -0.000000 0.001186 +v 0.003311 -0.000000 0.001311 +v -0.003301 -0.000000 0.001313 +v -0.008412 -0.000000 0.001531 +v 0.003266 -0.000000 0.001711 +v -0.003252 -0.000000 0.001690 +v -0.008273 -0.000000 0.002343 +v 0.008259 -0.000000 0.002539 +v -0.003189 -0.000000 0.002032 +v 0.003207 -0.000000 0.002062 +v -0.003111 -0.000000 0.002328 +v 0.003136 -0.000000 0.002353 +v -0.002977 -0.000000 0.002712 +v -0.008075 -0.000000 0.003111 +v 0.003021 -0.000000 0.002687 +v 0.008015 -0.000000 0.003449 +v 0.002887 -0.000000 0.002995 +v -0.002805 -0.000000 0.003066 +v 0.002733 -0.000000 0.003276 +v -0.002598 -0.000000 0.003389 +v -0.007823 -0.000000 0.003833 +v 0.002559 -0.000000 0.003530 +v -0.002359 -0.000000 0.003680 +v 0.007687 -0.000000 0.004291 +v 0.002365 -0.000000 0.003758 +v -0.002091 -0.000000 0.003937 +v 0.002150 -0.000000 0.003961 +v -0.007516 -0.000000 0.004507 +v -0.001795 -0.000000 0.004158 +v 0.001914 -0.000000 0.004138 +v 0.001656 -0.000000 0.004290 +v -0.001476 -0.000000 0.004342 +v 0.001375 -0.000000 0.004417 +v 0.007277 -0.000000 0.005063 +v -0.001134 -0.000000 0.004488 +v 0.001073 -0.000000 0.004520 +v -0.000773 -0.000000 0.004594 +v -0.007156 -0.000000 0.005134 +v 0.000747 -0.000000 0.004599 +v -0.000396 -0.000000 0.004657 +v 0.000398 -0.000000 0.004654 +v -0.000005 -0.000000 0.004678 +v 0.006788 -0.000000 0.005764 +v -0.006744 -0.000000 0.005711 +v -0.006281 -0.000000 0.006236 +v 0.006219 -0.000000 0.006390 +v -0.005767 -0.000000 0.006709 +v 0.005572 -0.000000 0.006942 +v -0.005205 -0.000000 0.007129 +v 0.004848 -0.000000 0.007416 +v -0.004595 -0.000000 0.007493 +v 0.004048 -0.000000 0.007811 +v -0.003938 -0.000000 0.007801 +v -0.003236 -0.000000 0.008051 +v 0.003173 -0.000000 0.008126 +v -0.002788 -0.000000 0.008172 +v 0.002742 -0.000000 0.008234 +v -0.002289 -0.000000 0.008271 +v 0.002258 -0.000000 0.008321 +v -0.001751 -0.000000 0.008350 +v 0.001731 -0.000000 0.008387 +v -0.001184 -0.000000 0.008408 +v 0.001172 -0.000000 0.008433 +v -0.000598 -0.000000 0.008445 +v 0.000590 -0.000000 0.008458 +v -0.000003 -0.000000 0.008462 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.061 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 12//1 11//1 10//1 +f 12//1 13//1 11//1 +f 14//1 13//1 12//1 +f 14//1 15//1 13//1 +f 14//1 16//1 15//1 +f 17//1 16//1 14//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 19//1 20//1 18//1 +f 21//1 20//1 19//1 +f 21//1 22//1 20//1 +f 23//1 22//1 21//1 +f 23//1 24//1 22//1 +f 23//1 25//1 24//1 +f 26//1 25//1 23//1 +f 26//1 27//1 25//1 +f 28//1 27//1 26//1 +f 28//1 29//1 27//1 +f 30//1 29//1 28//1 +f 30//1 31//1 29//1 +f 32//1 31//1 30//1 +f 32//1 33//1 31//1 +f 32//1 34//1 33//1 +f 35//1 36//1 32//1 +f 36//1 37//1 32//1 +f 37//1 34//1 32//1 +f 38//1 34//1 37//1 +f 35//1 39//1 36//1 +f 40//1 34//1 38//1 +f 35//1 41//1 39//1 +f 42//1 34//1 40//1 +f 35//1 43//1 41//1 +f 44//1 34//1 42//1 +f 35//1 45//1 43//1 +f 46//1 34//1 44//1 +f 47//1 34//1 46//1 +f 47//1 48//1 34//1 +f 35//1 49//1 45//1 +f 50//1 48//1 47//1 +f 51//1 48//1 50//1 +f 35//1 52//1 49//1 +f 53//1 48//1 51//1 +f 54//1 52//1 35//1 +f 54//1 55//1 52//1 +f 56//1 48//1 53//1 +f 56//1 57//1 48//1 +f 58//1 57//1 56//1 +f 54//1 59//1 55//1 +f 60//1 57//1 58//1 +f 54//1 61//1 59//1 +f 60//1 62//1 57//1 +f 63//1 62//1 60//1 +f 54//1 64//1 61//1 +f 65//1 62//1 63//1 +f 65//1 66//1 62//1 +f 67//1 64//1 54//1 +f 67//1 68//1 64//1 +f 69//1 66//1 65//1 +f 70//1 66//1 69//1 +f 67//1 71//1 68//1 +f 70//1 72//1 66//1 +f 73//1 72//1 70//1 +f 67//1 74//1 71//1 +f 73//1 75//1 72//1 +f 76//1 75//1 73//1 +f 77//1 74//1 67//1 +f 77//1 78//1 74//1 +f 79//1 75//1 76//1 +f 79//1 80//1 75//1 +f 81//1 80//1 79//1 +f 77//1 82//1 78//1 +f 83//1 82//1 77//1 +f 81//1 84//1 80//1 +f 85//1 84//1 81//1 +f 83//1 86//1 82//1 +f 87//1 86//1 83//1 +f 88//1 84//1 85//1 +f 88//1 89//1 84//1 +f 87//1 90//1 86//1 +f 91//1 89//1 88//1 +f 92//1 90//1 87//1 +f 91//1 93//1 89//1 +f 92//1 94//1 90//1 +f 95//1 94//1 92//1 +f 96//1 93//1 91//1 +f 96//1 97//1 93//1 +f 95//1 98//1 94//1 +f 99//1 98//1 95//1 +f 96//1 100//1 97//1 +f 101//1 100//1 96//1 +f 99//1 102//1 98//1 +f 103//1 102//1 99//1 +f 101//1 104//1 100//1 +f 105//1 104//1 101//1 +f 103//1 106//1 102//1 +f 107//1 106//1 103//1 +f 108//1 104//1 105//1 +f 107//1 109//1 106//1 +f 110//1 109//1 107//1 +f 111//1 104//1 108//1 +f 112//1 109//1 110//1 +f 112//1 113//1 109//1 +f 111//1 114//1 104//1 +f 115//1 113//1 112//1 +f 116//1 114//1 111//1 +f 115//1 117//1 113//1 +f 118//1 117//1 115//1 +f 119//1 114//1 116//1 +f 118//1 120//1 117//1 +f 121//1 120//1 118//1 +f 119//1 122//1 114//1 +f 121//1 123//1 120//1 +f 124//1 122//1 119//1 +f 121//1 125//1 123//1 +f 126//1 122//1 124//1 +f 121//1 127//1 125//1 +f 128//1 127//1 121//1 +f 129//1 122//1 126//1 +f 129//1 130//1 122//1 +f 131//1 130//1 129//1 +f 128//1 132//1 127//1 +f 133//1 130//1 131//1 +f 128//1 134//1 132//1 +f 135//1 134//1 128//1 +f 136//1 130//1 133//1 +f 135//1 137//1 134//1 +f 136//1 138//1 130//1 +f 139//1 138//1 136//1 +f 135//1 140//1 137//1 +f 141//1 138//1 139//1 +f 142//1 140//1 135//1 +f 142//1 143//1 140//1 +f 144//1 138//1 141//1 +f 145//1 138//1 144//1 +f 142//1 146//1 143//1 +f 147//1 138//1 145//1 +f 147//1 148//1 138//1 +f 142//1 149//1 146//1 +f 150//1 148//1 147//1 +f 142//1 151//1 149//1 +f 152//1 151//1 142//1 +f 153//1 148//1 150//1 +f 152//1 154//1 151//1 +f 155//1 148//1 153//1 +f 156//1 148//1 155//1 +f 152//1 156//1 154//1 +f 152//1 148//1 156//1 +f 152//1 157//1 148//1 +f 158//1 157//1 152//1 +f 159//1 157//1 158//1 +f 159//1 160//1 157//1 +f 161//1 160//1 159//1 +f 161//1 162//1 160//1 +f 163//1 162//1 161//1 +f 163//1 164//1 162//1 +f 165//1 164//1 163//1 +f 165//1 166//1 164//1 +f 167//1 166//1 165//1 +f 168//1 166//1 167//1 +f 168//1 169//1 166//1 +f 170//1 169//1 168//1 +f 170//1 171//1 169//1 +f 172//1 171//1 170//1 +f 172//1 173//1 171//1 +f 174//1 173//1 172//1 +f 174//1 175//1 173//1 +f 176//1 175//1 174//1 +f 176//1 177//1 175//1 +f 178//1 177//1 176//1 +f 178//1 179//1 177//1 +f 180//1 179//1 178//1 diff --git a/alphanumeric/P.mtl b/alphanumeric/P.mtl new file mode 100644 index 0000000..3906280 --- /dev/null +++ b/alphanumeric/P.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.063 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/P.obj b/alphanumeric/P.obj new file mode 100644 index 0000000..43bcbd0 --- /dev/null +++ b/alphanumeric/P.obj @@ -0,0 +1,268 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib P.mtl +o P +v -0.007011 -0.000000 0.000000 +v -0.006917 0.000000 -0.008212 +v -0.007011 0.000000 -0.008212 +v -0.006656 0.000000 -0.008212 +v -0.006263 0.000000 -0.008212 +v -0.005769 0.000000 -0.008212 +v -0.005209 0.000000 -0.008212 +v -0.004615 0.000000 -0.008212 +v -0.004022 0.000000 -0.008212 +v -0.003462 0.000000 -0.008212 +v -0.002968 0.000000 -0.008212 +v -0.002575 0.000000 -0.008212 +v -0.002314 0.000000 -0.008212 +v -0.002220 0.000000 -0.008212 +v -0.001045 0.000000 -0.008211 +v -0.000040 0.000000 -0.008206 +v 0.000812 0.000000 -0.008196 +v 0.001530 0.000000 -0.008179 +v -0.001784 0.000000 -0.004853 +v 0.002130 0.000000 -0.008154 +v 0.002631 0.000000 -0.008119 +v 0.003051 0.000000 -0.008073 +v 0.003407 0.000000 -0.008013 +v 0.003717 0.000000 -0.007939 +v 0.003999 0.000000 -0.007849 +v 0.004271 0.000000 -0.007742 +v 0.004551 0.000000 -0.007615 +v 0.004878 0.000000 -0.007448 +v 0.005180 0.000000 -0.007259 +v 0.005460 0.000000 -0.007047 +v 0.005715 0.000000 -0.006811 +v 0.005947 0.000000 -0.006552 +v 0.006156 0.000000 -0.006270 +v 0.006342 0.000000 -0.005963 +v 0.006506 0.000000 -0.005632 +v 0.006647 0.000000 -0.005276 +v 0.006767 0.000000 -0.004895 +v -0.000751 0.000000 -0.004853 +v -0.000609 0.000000 -0.004852 +v -0.000464 0.000000 -0.004848 +v -0.000318 0.000000 -0.004842 +v 0.006864 0.000000 -0.004488 +v -0.001784 0.000000 -0.003036 +v -0.000173 0.000000 -0.004834 +v -0.000030 0.000000 -0.004824 +v 0.000109 0.000000 -0.004812 +v 0.000242 0.000000 -0.004799 +v 0.000367 0.000000 -0.004785 +v 0.000483 0.000000 -0.004769 +v 0.000587 0.000000 -0.004752 +v 0.000678 0.000000 -0.004734 +v 0.000755 0.000000 -0.004716 +v 0.000903 0.000000 -0.004668 +v 0.001042 0.000000 -0.004610 +v 0.001173 0.000000 -0.004543 +v 0.001294 0.000000 -0.004467 +v 0.006940 0.000000 -0.004056 +v 0.001408 0.000000 -0.004381 +v 0.001512 0.000000 -0.004285 +v 0.001608 0.000000 -0.004180 +v 0.001696 0.000000 -0.004065 +v 0.001775 0.000000 -0.003941 +v 0.007001 0.000000 -0.003474 +v 0.001846 0.000000 -0.003807 +v 0.001908 0.000000 -0.003663 +v 0.001962 0.000000 -0.003509 +v 0.001990 0.000000 -0.003405 +v 0.007011 0.000000 -0.002906 +v 0.002013 0.000000 -0.003311 +v 0.002029 0.000000 -0.003225 +v 0.002040 0.000000 -0.003143 +v 0.002044 0.000000 -0.003065 +v 0.002043 0.000000 -0.002988 +v -0.001784 0.000000 -0.001219 +v 0.002035 0.000000 -0.002910 +v 0.002021 0.000000 -0.002830 +v 0.006972 0.000000 -0.002355 +v 0.002000 0.000000 -0.002744 +v 0.001973 0.000000 -0.002652 +v 0.001940 0.000000 -0.002551 +v 0.001900 0.000000 -0.002439 +v 0.001812 0.000000 -0.002234 +v 0.006883 0.000000 -0.001824 +v 0.001714 0.000000 -0.002053 +v 0.001603 0.000000 -0.001896 +v 0.001476 0.000000 -0.001760 +v 0.006748 0.000000 -0.001316 +v 0.001330 0.000000 -0.001644 +v 0.001161 0.000000 -0.001546 +v 0.000966 0.000000 -0.001466 +v 0.000743 0.000000 -0.001401 +v 0.000487 0.000000 -0.001351 +v 0.000197 0.000000 -0.001313 +v 0.006567 0.000000 -0.000834 +v -0.000132 0.000000 -0.001286 +v -0.000502 0.000000 -0.001269 +v -0.000527 0.000000 -0.001268 +v -0.000597 0.000000 -0.001266 +v -0.000703 0.000000 -0.001261 +v -0.000835 0.000000 -0.001256 +v -0.000984 0.000000 -0.001251 +v -0.001143 0.000000 -0.001244 +v -0.001302 0.000000 -0.001238 +v -0.001452 0.000000 -0.001232 +v -0.001584 0.000000 -0.001227 +v -0.001689 0.000000 -0.001223 +v -0.001759 0.000000 -0.001220 +v 0.006341 0.000000 -0.000381 +v 0.006072 -0.000000 0.000040 +v -0.007011 -0.000000 0.008212 +v -0.001784 -0.000000 0.002115 +v 0.005761 -0.000000 0.000426 +v 0.005409 -0.000000 0.000774 +v 0.005017 -0.000000 0.001081 +v 0.004588 -0.000000 0.001344 +v 0.004308 -0.000000 0.001487 +v 0.004032 -0.000000 0.001613 +v 0.003754 -0.000000 0.001722 +v 0.003467 -0.000000 0.001815 +v 0.003168 -0.000000 0.001893 +v 0.002850 -0.000000 0.001958 +v 0.002508 -0.000000 0.002010 +v 0.002137 -0.000000 0.002051 +v 0.001730 -0.000000 0.002080 +v 0.001283 -0.000000 0.002100 +v 0.000789 -0.000000 0.002112 +v 0.000244 -0.000000 0.002115 +v -0.001784 -0.000000 0.005164 +v -0.001784 -0.000000 0.008212 +v -0.004398 -0.000000 0.008212 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.063 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 19//1 20//1 18//1 +f 19//1 21//1 20//1 +f 19//1 22//1 21//1 +f 19//1 23//1 22//1 +f 19//1 24//1 23//1 +f 19//1 25//1 24//1 +f 19//1 26//1 25//1 +f 19//1 27//1 26//1 +f 19//1 28//1 27//1 +f 19//1 29//1 28//1 +f 19//1 30//1 29//1 +f 19//1 31//1 30//1 +f 19//1 32//1 31//1 +f 19//1 33//1 32//1 +f 19//1 34//1 33//1 +f 19//1 35//1 34//1 +f 19//1 36//1 35//1 +f 19//1 37//1 36//1 +f 19//1 38//1 37//1 +f 38//1 39//1 37//1 +f 39//1 40//1 37//1 +f 40//1 41//1 37//1 +f 41//1 42//1 37//1 +f 1//1 43//1 19//1 +f 44//1 42//1 41//1 +f 45//1 42//1 44//1 +f 46//1 42//1 45//1 +f 47//1 42//1 46//1 +f 48//1 42//1 47//1 +f 49//1 42//1 48//1 +f 50//1 42//1 49//1 +f 51//1 42//1 50//1 +f 52//1 42//1 51//1 +f 53//1 42//1 52//1 +f 54//1 42//1 53//1 +f 55//1 42//1 54//1 +f 56//1 42//1 55//1 +f 56//1 57//1 42//1 +f 58//1 57//1 56//1 +f 59//1 57//1 58//1 +f 60//1 57//1 59//1 +f 61//1 57//1 60//1 +f 62//1 57//1 61//1 +f 62//1 63//1 57//1 +f 64//1 63//1 62//1 +f 65//1 63//1 64//1 +f 66//1 63//1 65//1 +f 67//1 63//1 66//1 +f 67//1 68//1 63//1 +f 69//1 68//1 67//1 +f 70//1 68//1 69//1 +f 71//1 68//1 70//1 +f 72//1 68//1 71//1 +f 73//1 68//1 72//1 +f 1//1 74//1 43//1 +f 75//1 68//1 73//1 +f 76//1 68//1 75//1 +f 76//1 77//1 68//1 +f 78//1 77//1 76//1 +f 79//1 77//1 78//1 +f 80//1 77//1 79//1 +f 81//1 77//1 80//1 +f 82//1 77//1 81//1 +f 82//1 83//1 77//1 +f 84//1 83//1 82//1 +f 85//1 83//1 84//1 +f 86//1 83//1 85//1 +f 86//1 87//1 83//1 +f 88//1 87//1 86//1 +f 89//1 87//1 88//1 +f 90//1 87//1 89//1 +f 91//1 87//1 90//1 +f 92//1 87//1 91//1 +f 93//1 87//1 92//1 +f 93//1 94//1 87//1 +f 95//1 94//1 93//1 +f 96//1 94//1 95//1 +f 97//1 94//1 96//1 +f 98//1 94//1 97//1 +f 99//1 94//1 98//1 +f 100//1 94//1 99//1 +f 101//1 94//1 100//1 +f 102//1 94//1 101//1 +f 103//1 94//1 102//1 +f 104//1 94//1 103//1 +f 105//1 94//1 104//1 +f 106//1 94//1 105//1 +f 107//1 94//1 106//1 +f 74//1 94//1 107//1 +f 1//1 94//1 74//1 +f 1//1 108//1 94//1 +f 1//1 109//1 108//1 +f 110//1 111//1 1//1 +f 111//1 109//1 1//1 +f 111//1 112//1 109//1 +f 111//1 113//1 112//1 +f 111//1 114//1 113//1 +f 111//1 115//1 114//1 +f 111//1 116//1 115//1 +f 111//1 117//1 116//1 +f 111//1 118//1 117//1 +f 111//1 119//1 118//1 +f 111//1 120//1 119//1 +f 111//1 121//1 120//1 +f 111//1 122//1 121//1 +f 111//1 123//1 122//1 +f 111//1 124//1 123//1 +f 111//1 125//1 124//1 +f 111//1 126//1 125//1 +f 111//1 127//1 126//1 +f 110//1 128//1 111//1 +f 110//1 129//1 128//1 +f 130//2 129//2 110//2 diff --git a/alphanumeric/Q.mtl b/alphanumeric/Q.mtl new file mode 100644 index 0000000..ff422f5 --- /dev/null +++ b/alphanumeric/Q.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.065 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/Q.obj b/alphanumeric/Q.obj new file mode 100644 index 0000000..7bbb767 --- /dev/null +++ b/alphanumeric/Q.obj @@ -0,0 +1,681 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Q.mtl +o Q +v -0.000711 0.000000 -0.009236 +v 0.000359 0.000000 -0.009215 +v -0.000175 0.000000 -0.009237 +v -0.001245 0.000000 -0.009212 +v 0.000890 0.000000 -0.009171 +v -0.001774 0.000000 -0.009165 +v 0.001413 0.000000 -0.009107 +v -0.002624 0.000000 -0.009027 +v 0.001927 0.000000 -0.009019 +v -0.003428 0.000000 -0.008822 +v 0.002427 0.000000 -0.008911 +v 0.002911 0.000000 -0.008783 +v -0.004184 0.000000 -0.008549 +v 0.003376 0.000000 -0.008635 +v 0.003819 0.000000 -0.008468 +v -0.004892 0.000000 -0.008213 +v 0.004237 0.000000 -0.008282 +v 0.004507 0.000000 -0.008137 +v -0.005547 0.000000 -0.007812 +v 0.004787 0.000000 -0.007962 +v 0.005071 0.000000 -0.007760 +v -0.006149 0.000000 -0.007351 +v 0.005357 0.000000 -0.007534 +v 0.005640 0.000000 -0.007289 +v -0.006695 0.000000 -0.006830 +v 0.005917 0.000000 -0.007029 +v 0.006183 0.000000 -0.006757 +v -0.007182 0.000000 -0.006251 +v 0.006436 0.000000 -0.006477 +v 0.006670 0.000000 -0.006194 +v -0.007609 0.000000 -0.005616 +v 0.006882 0.000000 -0.005910 +v 0.007068 0.000000 -0.005630 +v 0.007224 0.000000 -0.005357 +v -0.007974 0.000000 -0.004927 +v -0.000667 0.000000 -0.005304 +v -0.000281 0.000000 -0.005320 +v -0.000122 0.000000 -0.005318 +v 0.000026 0.000000 -0.005313 +v 0.007391 0.000000 -0.005025 +v 0.000164 0.000000 -0.005305 +v 0.000294 0.000000 -0.005292 +v -0.001028 0.000000 -0.005258 +v 0.000418 0.000000 -0.005276 +v 0.000536 0.000000 -0.005254 +v -0.001366 0.000000 -0.005181 +v 0.000650 0.000000 -0.005229 +v 0.000762 0.000000 -0.005198 +v 0.000873 0.000000 -0.005162 +v -0.001680 0.000000 -0.005072 +v 0.000984 0.000000 -0.005121 +v 0.001097 0.000000 -0.005074 +v 0.001213 0.000000 -0.005021 +v -0.001972 0.000000 -0.004932 +v 0.007539 0.000000 -0.004699 +v 0.001301 0.000000 -0.004979 +v 0.001390 0.000000 -0.004933 +v 0.001479 0.000000 -0.004884 +v -0.002241 0.000000 -0.004760 +v -0.008274 0.000000 -0.004184 +v 0.001567 0.000000 -0.004834 +v 0.001653 0.000000 -0.004782 +v 0.001735 0.000000 -0.004729 +v -0.002488 0.000000 -0.004556 +v 0.001814 0.000000 -0.004676 +v 0.007669 0.000000 -0.004374 +v 0.001887 0.000000 -0.004624 +v 0.001955 0.000000 -0.004573 +v 0.002015 0.000000 -0.004525 +v -0.002715 0.000000 -0.004319 +v 0.002066 0.000000 -0.004479 +v 0.002109 0.000000 -0.004436 +v 0.002280 0.000000 -0.004223 +v 0.007781 0.000000 -0.004046 +v -0.002920 0.000000 -0.004049 +v 0.002437 0.000000 -0.003970 +v -0.008507 0.000000 -0.003391 +v -0.003105 0.000000 -0.003747 +v 0.007877 0.000000 -0.003710 +v 0.002581 0.000000 -0.003683 +v -0.003271 0.000000 -0.003412 +v 0.007957 0.000000 -0.003362 +v 0.002709 0.000000 -0.003363 +v -0.003417 0.000000 -0.003043 +v -0.008579 0.000000 -0.003052 +v 0.002822 0.000000 -0.003015 +v 0.008022 0.000000 -0.002997 +v -0.008637 0.000000 -0.002653 +v -0.003488 0.000000 -0.002786 +v 0.002918 0.000000 -0.002643 +v 0.008073 0.000000 -0.002612 +v -0.003547 0.000000 -0.002467 +v -0.008682 0.000000 -0.002206 +v 0.002996 0.000000 -0.002250 +v 0.008111 0.000000 -0.002202 +v -0.003592 0.000000 -0.002096 +v 0.003056 0.000000 -0.001839 +v -0.008715 0.000000 -0.001724 +v 0.008137 0.000000 -0.001763 +v -0.003624 0.000000 -0.001687 +v 0.003096 0.000000 -0.001414 +v 0.008152 0.000000 -0.001290 +v -0.008735 0.000000 -0.001220 +v -0.003644 0.000000 -0.001251 +v 0.003116 0.000000 -0.000980 +v 0.008157 0.000000 -0.000778 +v -0.003650 0.000000 -0.000800 +v -0.008742 0.000000 -0.000705 +v 0.003115 0.000000 -0.000538 +v -0.003644 0.000000 -0.000346 +v 0.008156 0.000000 -0.000421 +v -0.008737 0.000000 -0.000193 +v 0.003092 0.000000 -0.000094 +v 0.008153 0.000000 -0.000100 +v -0.003624 -0.000000 0.000097 +v -0.008718 -0.000000 0.000305 +v 0.008146 -0.000000 0.000190 +v 0.003072 -0.000000 0.000171 +v -0.003592 -0.000000 0.000520 +v 0.003043 -0.000000 0.000439 +v 0.008136 -0.000000 0.000452 +v -0.008688 -0.000000 0.000775 +v 0.003006 -0.000000 0.000707 +v 0.008123 -0.000000 0.000690 +v -0.003547 -0.000000 0.000908 +v 0.008106 -0.000000 0.000909 +v 0.002963 -0.000000 0.000969 +v -0.008644 -0.000000 0.001206 +v -0.003488 -0.000000 0.001251 +v 0.008084 -0.000000 0.001113 +v -0.000279 -0.000000 0.000935 +v 0.000074 -0.000000 0.000965 +v -0.000116 -0.000000 0.000928 +v -0.000421 -0.000000 0.000986 +v 0.000295 -0.000000 0.001046 +v 0.002915 -0.000000 0.001221 +v -0.000547 -0.000000 0.001079 +v 0.000553 -0.000000 0.001172 +v -0.000662 -0.000000 0.001214 +v 0.008058 -0.000000 0.001306 +v 0.000853 -0.000000 0.001343 +v -0.008588 -0.000000 0.001584 +v -0.000771 -0.000000 0.001392 +v 0.002863 -0.000000 0.001458 +v -0.003417 -0.000000 0.001536 +v 0.008026 -0.000000 0.001491 +v 0.001200 -0.000000 0.001559 +v -0.000878 -0.000000 0.001611 +v 0.002808 -0.000000 0.001677 +v 0.007989 -0.000000 0.001674 +v -0.003287 -0.000000 0.001912 +v 0.001598 -0.000000 0.001822 +v -0.008520 -0.000000 0.001897 +v -0.000921 -0.000000 0.001710 +v 0.007945 -0.000000 0.001857 +v 0.002752 -0.000000 0.001871 +v -0.000964 -0.000000 0.001809 +v -0.001005 -0.000000 0.001905 +v 0.001713 -0.000000 0.001898 +v 0.007896 -0.000000 0.002046 +v 0.002695 -0.000000 0.002037 +v -0.008418 -0.000000 0.002257 +v 0.001824 -0.000000 0.001970 +v -0.001044 -0.000000 0.001998 +v -0.003134 -0.000000 0.002255 +v 0.001932 -0.000000 0.002037 +v -0.001080 -0.000000 0.002086 +v 0.002034 -0.000000 0.002098 +v 0.002638 -0.000000 0.002171 +v 0.007827 -0.000000 0.002287 +v -0.001115 -0.000000 0.002169 +v 0.002130 -0.000000 0.002153 +v 0.002219 -0.000000 0.002202 +v -0.001146 -0.000000 0.002246 +v 0.002584 -0.000000 0.002266 +v 0.002299 -0.000000 0.002243 +v 0.002370 -0.000000 0.002277 +v -0.001173 -0.000000 0.002315 +v -0.002957 -0.000000 0.002565 +v -0.008304 -0.000000 0.002603 +v 0.002532 -0.000000 0.002320 +v 0.002430 -0.000000 0.002302 +v 0.007750 -0.000000 0.002529 +v 0.002477 -0.000000 0.002318 +v -0.001197 -0.000000 0.002376 +v 0.002512 -0.000000 0.002324 +v -0.001216 -0.000000 0.002426 +v -0.001230 -0.000000 0.002466 +v -0.001239 -0.000000 0.002494 +v -0.001237 -0.000000 0.002517 +v -0.001220 -0.000000 0.002548 +v 0.007666 -0.000000 0.002770 +v -0.001188 -0.000000 0.002586 +v -0.002757 -0.000000 0.002843 +v -0.001142 -0.000000 0.002631 +v -0.008179 -0.000000 0.002937 +v -0.001084 -0.000000 0.002681 +v -0.001015 -0.000000 0.002735 +v -0.000935 -0.000000 0.002794 +v 0.007576 -0.000000 0.003010 +v -0.000846 -0.000000 0.002856 +v -0.002533 -0.000000 0.003088 +v -0.000749 -0.000000 0.002920 +v -0.000644 -0.000000 0.002985 +v -0.008042 -0.000000 0.003258 +v -0.000534 -0.000000 0.003051 +v 0.007480 -0.000000 0.003246 +v -0.000418 -0.000000 0.003116 +v -0.002286 -0.000000 0.003300 +v -0.000152 -0.000000 0.003265 +v 0.007379 -0.000000 0.003477 +v -0.007892 -0.000000 0.003569 +v 0.000067 -0.000000 0.003394 +v -0.002015 -0.000000 0.003480 +v 0.000239 -0.000000 0.003505 +v 0.007275 -0.000000 0.003703 +v -0.001721 -0.000000 0.003627 +v 0.000364 -0.000000 0.003599 +v -0.007729 -0.000000 0.003869 +v 0.000442 -0.000000 0.003677 +v -0.001403 -0.000000 0.003741 +v 0.000474 -0.000000 0.003740 +v 0.007167 -0.000000 0.003920 +v 0.000459 -0.000000 0.003791 +v -0.001061 -0.000000 0.003823 +v 0.000398 -0.000000 0.003829 +v -0.000695 -0.000000 0.003872 +v 0.000291 -0.000000 0.003857 +v 0.000138 -0.000000 0.003875 +v -0.007553 -0.000000 0.004160 +v -0.000306 -0.000000 0.003888 +v -0.000061 -0.000000 0.003885 +v 0.007056 -0.000000 0.004129 +v 0.006943 -0.000000 0.004326 +v -0.007363 -0.000000 0.004443 +v 0.006829 -0.000000 0.004512 +v -0.007158 -0.000000 0.004718 +v 0.006714 -0.000000 0.004684 +v 0.006465 -0.000000 0.005033 +v -0.006939 -0.000000 0.004986 +v -0.006704 -0.000000 0.005248 +v 0.006472 -0.000000 0.005038 +v 0.006491 -0.000000 0.005054 +v 0.006521 -0.000000 0.005077 +v 0.006558 -0.000000 0.005107 +v 0.006600 -0.000000 0.005140 +v 0.006645 -0.000000 0.005176 +v 0.006690 -0.000000 0.005211 +v 0.006732 -0.000000 0.005245 +v 0.006769 -0.000000 0.005274 +v -0.006454 -0.000000 0.005505 +v 0.006799 -0.000000 0.005298 +v 0.006818 -0.000000 0.005313 +v 0.006826 -0.000000 0.005319 +v 0.006882 -0.000000 0.005362 +v 0.006949 -0.000000 0.005411 +v 0.007027 -0.000000 0.005465 +v 0.007114 -0.000000 0.005522 +v -0.006172 -0.000000 0.005774 +v 0.007208 -0.000000 0.005583 +v 0.007308 -0.000000 0.005645 +v 0.007413 -0.000000 0.005710 +v 0.007521 -0.000000 0.005775 +v -0.005892 -0.000000 0.006019 +v 0.007631 -0.000000 0.005840 +v 0.007741 -0.000000 0.005905 +v 0.007851 -0.000000 0.005968 +v 0.007958 -0.000000 0.006028 +v -0.005612 -0.000000 0.006244 +v 0.008065 -0.000000 0.006084 +v 0.008167 -0.000000 0.006140 +v 0.008264 -0.000000 0.006195 +v 0.008355 -0.000000 0.006249 +v -0.005328 -0.000000 0.006448 +v 0.008438 -0.000000 0.006300 +v 0.008513 -0.000000 0.006348 +v 0.008580 -0.000000 0.006393 +v 0.008636 -0.000000 0.006434 +v 0.008681 -0.000000 0.006469 +v -0.005039 -0.000000 0.006633 +v 0.008714 -0.000000 0.006499 +v 0.008735 -0.000000 0.006522 +v 0.008742 -0.000000 0.006538 +v 0.008717 -0.000000 0.006651 +v -0.004741 -0.000000 0.006801 +v 0.008648 -0.000000 0.006838 +v -0.004433 -0.000000 0.006953 +v 0.008543 -0.000000 0.007081 +v -0.004111 -0.000000 0.007089 +v 0.003782 -0.000000 0.007092 +v 0.008411 -0.000000 0.007365 +v -0.003774 -0.000000 0.007212 +v 0.003719 -0.000000 0.007094 +v 0.003845 -0.000000 0.007103 +v 0.003653 -0.000000 0.007105 +v 0.003911 -0.000000 0.007125 +v 0.003581 -0.000000 0.007127 +v 0.003983 -0.000000 0.007161 +v 0.003498 -0.000000 0.007158 +v 0.003403 -0.000000 0.007198 +v 0.004065 -0.000000 0.007210 +v 0.003045 -0.000000 0.007326 +v 0.004159 -0.000000 0.007273 +v -0.003419 -0.000000 0.007322 +v 0.004269 -0.000000 0.007352 +v -0.003044 -0.000000 0.007420 +v 0.002630 -0.000000 0.007438 +v 0.004399 -0.000000 0.007447 +v 0.008259 -0.000000 0.007676 +v -0.002645 -0.000000 0.007509 +v 0.002167 -0.000000 0.007532 +v 0.004649 -0.000000 0.007626 +v -0.002162 -0.000000 0.007594 +v 0.001666 -0.000000 0.007607 +v -0.001644 -0.000000 0.007658 +v 0.001135 -0.000000 0.007665 +v 0.004927 -0.000000 0.007817 +v -0.001102 -0.000000 0.007700 +v 0.000584 -0.000000 0.007703 +v 0.008095 -0.000000 0.007996 +v -0.000544 -0.000000 0.007721 +v 0.000021 -0.000000 0.007722 +v 0.005225 -0.000000 0.008015 +v 0.007928 -0.000000 0.008310 +v 0.005534 -0.000000 0.008214 +v 0.005844 -0.000000 0.008412 +v 0.007767 -0.000000 0.008601 +v 0.006147 -0.000000 0.008599 +v 0.006435 -0.000000 0.008772 +v 0.007619 -0.000000 0.008857 +v 0.006699 -0.000000 0.008926 +v 0.007492 -0.000000 0.009058 +v 0.006929 -0.000000 0.009055 +v 0.007118 -0.000000 0.009152 +v 0.007395 -0.000000 0.009191 +v 0.007257 -0.000000 0.009216 +v 0.007336 -0.000000 0.009237 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.065 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 10//1 12//1 11//1 +f 13//1 12//1 10//1 +f 13//1 14//1 12//1 +f 13//1 15//1 14//1 +f 16//1 15//1 13//1 +f 16//1 17//1 15//1 +f 16//1 18//1 17//1 +f 19//1 18//1 16//1 +f 19//1 20//1 18//1 +f 19//1 21//1 20//1 +f 22//1 21//1 19//1 +f 22//1 23//1 21//1 +f 22//1 24//1 23//1 +f 25//1 24//1 22//1 +f 25//1 26//1 24//1 +f 25//1 27//1 26//1 +f 28//1 27//1 25//1 +f 28//1 29//1 27//1 +f 28//1 30//1 29//1 +f 31//1 30//1 28//1 +f 31//1 32//1 30//1 +f 31//1 33//1 32//1 +f 31//1 34//1 33//1 +f 35//1 36//1 31//1 +f 36//1 37//1 31//1 +f 37//1 34//1 31//1 +f 37//1 38//1 34//1 +f 38//1 39//1 34//1 +f 39//1 40//1 34//1 +f 41//1 40//1 39//1 +f 42//1 40//1 41//1 +f 35//1 43//1 36//1 +f 44//1 40//1 42//1 +f 45//1 40//1 44//1 +f 35//1 46//1 43//1 +f 47//1 40//1 45//1 +f 48//1 40//1 47//1 +f 49//1 40//1 48//1 +f 35//1 50//1 46//1 +f 51//1 40//1 49//1 +f 52//1 40//1 51//1 +f 53//1 40//1 52//1 +f 35//1 54//1 50//1 +f 53//1 55//1 40//1 +f 56//1 55//1 53//1 +f 57//1 55//1 56//1 +f 58//1 55//1 57//1 +f 35//1 59//1 54//1 +f 60//1 59//1 35//1 +f 61//1 55//1 58//1 +f 62//1 55//1 61//1 +f 63//1 55//1 62//1 +f 60//1 64//1 59//1 +f 65//1 55//1 63//1 +f 65//1 66//1 55//1 +f 67//1 66//1 65//1 +f 68//1 66//1 67//1 +f 69//1 66//1 68//1 +f 60//1 70//1 64//1 +f 71//1 66//1 69//1 +f 72//1 66//1 71//1 +f 73//1 66//1 72//1 +f 73//1 74//1 66//1 +f 60//1 75//1 70//1 +f 76//1 74//1 73//1 +f 77//1 75//1 60//1 +f 77//1 78//1 75//1 +f 76//1 79//1 74//1 +f 80//1 79//1 76//1 +f 77//1 81//1 78//1 +f 80//1 82//1 79//1 +f 83//1 82//1 80//1 +f 77//1 84//1 81//1 +f 85//1 84//1 77//1 +f 86//1 82//1 83//1 +f 86//1 87//1 82//1 +f 88//1 84//1 85//1 +f 88//1 89//1 84//1 +f 90//1 87//1 86//1 +f 90//1 91//1 87//1 +f 88//1 92//1 89//1 +f 93//1 92//1 88//1 +f 94//1 91//1 90//1 +f 94//1 95//1 91//1 +f 93//1 96//1 92//1 +f 97//1 95//1 94//1 +f 98//1 96//1 93//1 +f 97//1 99//1 95//1 +f 98//1 100//1 96//1 +f 101//1 99//1 97//1 +f 101//1 102//1 99//1 +f 103//1 100//1 98//1 +f 103//1 104//1 100//1 +f 105//1 102//1 101//1 +f 105//1 106//1 102//1 +f 103//1 107//1 104//1 +f 108//1 107//1 103//1 +f 109//1 106//1 105//1 +f 108//1 110//1 107//1 +f 109//1 111//1 106//1 +f 112//1 110//1 108//1 +f 113//1 111//1 109//1 +f 113//1 114//1 111//1 +f 112//1 115//1 110//1 +f 116//1 115//1 112//1 +f 113//1 117//1 114//1 +f 118//1 117//1 113//1 +f 116//1 119//1 115//1 +f 120//1 117//1 118//1 +f 120//1 121//1 117//1 +f 122//1 119//1 116//1 +f 123//1 121//1 120//1 +f 123//1 124//1 121//1 +f 122//1 125//1 119//1 +f 123//1 126//1 124//1 +f 127//1 126//1 123//1 +f 128//1 125//1 122//1 +f 128//1 129//1 125//1 +f 127//1 130//1 126//1 +f 131//1 132//1 133//1 +f 134//1 132//1 131//1 +f 134//1 135//1 132//1 +f 136//1 130//1 127//1 +f 137//1 135//1 134//1 +f 137//1 138//1 135//1 +f 139//1 138//1 137//1 +f 136//1 140//1 130//1 +f 139//1 141//1 138//1 +f 142//1 129//1 128//1 +f 143//1 141//1 139//1 +f 144//1 140//1 136//1 +f 142//1 145//1 129//1 +f 144//1 146//1 140//1 +f 143//1 147//1 141//1 +f 148//1 147//1 143//1 +f 149//1 146//1 144//1 +f 149//1 150//1 146//1 +f 142//1 151//1 145//1 +f 148//1 152//1 147//1 +f 153//1 151//1 142//1 +f 154//1 152//1 148//1 +f 149//1 155//1 150//1 +f 156//1 155//1 149//1 +f 157//1 152//1 154//1 +f 158//1 152//1 157//1 +f 158//1 159//1 152//1 +f 156//1 160//1 155//1 +f 161//1 160//1 156//1 +f 162//1 151//1 153//1 +f 158//1 163//1 159//1 +f 164//1 163//1 158//1 +f 162//1 165//1 151//1 +f 164//1 166//1 163//1 +f 167//1 166//1 164//1 +f 167//1 168//1 166//1 +f 169//1 160//1 161//1 +f 169//1 170//1 160//1 +f 171//1 168//1 167//1 +f 171//1 172//1 168//1 +f 171//1 173//1 172//1 +f 174//1 173//1 171//1 +f 175//1 170//1 169//1 +f 174//1 176//1 173//1 +f 174//1 177//1 176//1 +f 178//1 177//1 174//1 +f 162//1 179//1 165//1 +f 180//1 179//1 162//1 +f 181//1 170//1 175//1 +f 178//1 182//1 177//1 +f 181//1 183//1 170//1 +f 178//1 184//1 182//1 +f 185//1 184//1 178//1 +f 185//1 186//1 184//1 +f 186//1 183//1 181//1 +f 185//1 183//1 186//1 +f 187//1 183//1 185//1 +f 188//1 183//1 187//1 +f 189//1 183//1 188//1 +f 190//1 183//1 189//1 +f 191//1 183//1 190//1 +f 191//1 192//1 183//1 +f 193//1 192//1 191//1 +f 180//1 194//1 179//1 +f 195//1 192//1 193//1 +f 196//1 194//1 180//1 +f 197//1 192//1 195//1 +f 198//1 192//1 197//1 +f 199//1 192//1 198//1 +f 199//1 200//1 192//1 +f 201//1 200//1 199//1 +f 196//1 202//1 194//1 +f 203//1 200//1 201//1 +f 204//1 200//1 203//1 +f 205//1 202//1 196//1 +f 206//1 200//1 204//1 +f 206//1 207//1 200//1 +f 208//1 207//1 206//1 +f 205//1 209//1 202//1 +f 210//1 207//1 208//1 +f 210//1 211//1 207//1 +f 212//1 209//1 205//1 +f 213//1 211//1 210//1 +f 212//1 214//1 209//1 +f 215//1 211//1 213//1 +f 215//1 216//1 211//1 +f 212//1 217//1 214//1 +f 218//1 216//1 215//1 +f 219//1 217//1 212//1 +f 220//1 216//1 218//1 +f 219//1 221//1 217//1 +f 222//1 216//1 220//1 +f 222//1 223//1 216//1 +f 224//1 223//1 222//1 +f 219//1 225//1 221//1 +f 226//1 223//1 224//1 +f 219//1 227//1 225//1 +f 228//1 223//1 226//1 +f 229//1 223//1 228//1 +f 230//1 227//1 219//1 +f 230//1 231//1 227//1 +f 232//1 223//1 229//1 +f 231//1 223//1 232//1 +f 230//1 223//1 231//1 +f 230//1 233//1 223//1 +f 230//1 234//1 233//1 +f 235//1 234//1 230//1 +f 235//1 236//1 234//1 +f 237//1 236//1 235//1 +f 237//1 238//1 236//1 +f 237//1 239//1 238//1 +f 240//1 239//1 237//1 +f 241//1 239//1 240//1 +f 241//1 242//1 239//1 +f 241//1 243//1 242//1 +f 241//1 244//1 243//1 +f 241//1 245//1 244//1 +f 241//1 246//1 245//1 +f 241//1 247//1 246//1 +f 241//1 248//1 247//1 +f 241//1 249//1 248//1 +f 241//1 250//1 249//1 +f 251//1 250//1 241//1 +f 251//1 252//1 250//1 +f 251//1 253//1 252//1 +f 251//1 254//1 253//1 +f 251//1 255//1 254//1 +f 251//1 256//1 255//1 +f 251//1 257//1 256//1 +f 251//1 258//1 257//1 +f 259//1 258//1 251//1 +f 259//1 260//1 258//1 +f 259//1 261//1 260//1 +f 259//1 262//1 261//1 +f 259//1 263//1 262//1 +f 264//1 263//1 259//1 +f 264//1 265//1 263//1 +f 264//1 266//1 265//1 +f 264//1 267//1 266//1 +f 264//1 268//1 267//1 +f 269//1 268//1 264//1 +f 269//1 270//1 268//1 +f 269//1 271//1 270//1 +f 269//1 272//1 271//1 +f 269//1 273//1 272//1 +f 274//1 273//1 269//1 +f 274//1 275//1 273//1 +f 274//1 276//1 275//1 +f 274//1 277//1 276//1 +f 274//1 278//1 277//1 +f 274//1 279//1 278//1 +f 280//1 279//1 274//1 +f 280//1 281//1 279//1 +f 280//1 282//1 281//1 +f 280//1 283//1 282//1 +f 280//1 284//1 283//1 +f 285//1 284//1 280//1 +f 285//1 286//1 284//1 +f 287//1 286//1 285//1 +f 287//1 288//1 286//1 +f 289//1 288//1 287//1 +f 289//1 290//1 288//1 +f 290//1 291//1 288//1 +f 292//1 290//1 289//1 +f 292//1 293//1 290//1 +f 294//1 291//1 290//1 +f 292//1 295//1 293//1 +f 296//1 291//1 294//1 +f 292//1 297//1 295//1 +f 298//1 291//1 296//1 +f 292//1 299//1 297//1 +f 292//1 300//1 299//1 +f 301//1 291//1 298//1 +f 292//1 302//1 300//1 +f 303//1 291//1 301//1 +f 304//1 302//1 292//1 +f 305//1 291//1 303//1 +f 306//1 302//1 304//1 +f 306//1 307//1 302//1 +f 308//1 291//1 305//1 +f 308//1 309//1 291//1 +f 310//1 307//1 306//1 +f 310//1 311//1 307//1 +f 312//1 309//1 308//1 +f 313//1 311//1 310//1 +f 313//1 314//1 311//1 +f 315//1 314//1 313//1 +f 315//1 316//1 314//1 +f 317//1 309//1 312//1 +f 318//1 316//1 315//1 +f 318//1 319//1 316//1 +f 317//1 320//1 309//1 +f 321//1 319//1 318//1 +f 321//1 322//1 319//1 +f 323//1 320//1 317//1 +f 323//1 324//1 320//1 +f 325//1 324//1 323//1 +f 326//1 324//1 325//1 +f 326//1 327//1 324//1 +f 328//1 327//1 326//1 +f 329//1 327//1 328//1 +f 329//1 330//1 327//1 +f 331//1 330//1 329//1 +f 331//1 332//1 330//1 +f 333//1 332//1 331//1 +f 334//1 332//1 333//1 +f 334//1 335//1 332//1 +f 336//1 335//1 334//1 +f 336//1 337//1 335//1 diff --git a/alphanumeric/R.mtl b/alphanumeric/R.mtl new file mode 100644 index 0000000..4c1c141 --- /dev/null +++ b/alphanumeric/R.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.067 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/R.obj b/alphanumeric/R.obj new file mode 100644 index 0000000..ad059dc --- /dev/null +++ b/alphanumeric/R.obj @@ -0,0 +1,416 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib R.mtl +o R +v -0.008083 -0.000000 0.000000 +v -0.007983 0.000000 -0.008212 +v -0.008083 0.000000 -0.008212 +v -0.007707 0.000000 -0.008212 +v -0.007290 0.000000 -0.008212 +v -0.006767 0.000000 -0.008212 +v -0.006173 0.000000 -0.008212 +v -0.005545 0.000000 -0.008212 +v -0.004916 0.000000 -0.008212 +v -0.004322 0.000000 -0.008212 +v -0.003799 0.000000 -0.008212 +v -0.003382 0.000000 -0.008212 +v -0.003106 0.000000 -0.008212 +v -0.003006 0.000000 -0.008212 +v -0.001723 0.000000 -0.008208 +v -0.000613 0.000000 -0.008199 +v 0.000340 0.000000 -0.008185 +v -0.002981 0.000000 -0.004853 +v 0.001152 0.000000 -0.008163 +v 0.001840 0.000000 -0.008133 +v 0.002421 0.000000 -0.008093 +v 0.002910 0.000000 -0.008041 +v 0.003324 0.000000 -0.007975 +v 0.003680 0.000000 -0.007896 +v 0.003994 0.000000 -0.007800 +v 0.004282 0.000000 -0.007686 +v 0.004561 0.000000 -0.007553 +v 0.004747 0.000000 -0.007451 +v 0.004931 0.000000 -0.007335 +v 0.005112 0.000000 -0.007204 +v 0.005288 0.000000 -0.007061 +v 0.005458 0.000000 -0.006906 +v 0.005622 0.000000 -0.006741 +v 0.005778 0.000000 -0.006567 +v 0.005926 0.000000 -0.006386 +v 0.006063 0.000000 -0.006199 +v 0.006189 0.000000 -0.006006 +v 0.006303 0.000000 -0.005810 +v 0.006403 0.000000 -0.005612 +v 0.006492 0.000000 -0.005376 +v 0.006564 0.000000 -0.005093 +v -0.001363 0.000000 -0.004853 +v -0.001080 0.000000 -0.004852 +v 0.006620 0.000000 -0.004771 +v -0.002981 0.000000 -0.003235 +v -0.000822 0.000000 -0.004848 +v -0.000586 0.000000 -0.004843 +v -0.000372 0.000000 -0.004834 +v -0.000178 0.000000 -0.004823 +v -0.000003 0.000000 -0.004809 +v 0.000155 0.000000 -0.004792 +v 0.000298 0.000000 -0.004772 +v 0.000426 0.000000 -0.004748 +v 0.006660 0.000000 -0.004420 +v 0.000541 0.000000 -0.004720 +v 0.000646 0.000000 -0.004689 +v 0.000740 0.000000 -0.004654 +v 0.000957 0.000000 -0.004545 +v 0.001144 0.000000 -0.004402 +v 0.006684 0.000000 -0.004049 +v 0.001301 0.000000 -0.004229 +v 0.001427 0.000000 -0.004034 +v 0.006691 0.000000 -0.003669 +v 0.001521 0.000000 -0.003820 +v 0.001582 0.000000 -0.003593 +v 0.006682 0.000000 -0.003289 +v 0.001609 0.000000 -0.003358 +v 0.001601 0.000000 -0.003121 +v 0.006657 0.000000 -0.002918 +v -0.002981 0.000000 -0.001618 +v 0.001558 0.000000 -0.002886 +v 0.006615 0.000000 -0.002565 +v 0.001477 0.000000 -0.002660 +v 0.001358 0.000000 -0.002447 +v 0.006557 0.000000 -0.002242 +v 0.001201 0.000000 -0.002252 +v 0.001084 0.000000 -0.002146 +v 0.006482 0.000000 -0.001956 +v 0.000948 0.000000 -0.002050 +v 0.000794 0.000000 -0.001964 +v 0.000618 0.000000 -0.001889 +v 0.006391 0.000000 -0.001717 +v 0.000421 0.000000 -0.001823 +v 0.000201 0.000000 -0.001767 +v -0.000044 0.000000 -0.001720 +v -0.000316 0.000000 -0.001683 +v 0.006247 0.000000 -0.001437 +v -0.000614 0.000000 -0.001654 +v -0.000941 0.000000 -0.001634 +v -0.001298 0.000000 -0.001622 +v -0.001686 0.000000 -0.001618 +v -0.002955 0.000000 -0.001618 +v -0.002885 0.000000 -0.001618 +v -0.002778 0.000000 -0.001618 +v -0.002645 0.000000 -0.001618 +v -0.002494 0.000000 -0.001618 +v -0.002334 0.000000 -0.001618 +v -0.002173 0.000000 -0.001618 +v -0.002022 0.000000 -0.001618 +v -0.001889 0.000000 -0.001618 +v -0.001782 0.000000 -0.001618 +v -0.001712 0.000000 -0.001618 +v 0.006079 0.000000 -0.001165 +v 0.005889 0.000000 -0.000902 +v 0.005679 0.000000 -0.000649 +v 0.005450 0.000000 -0.000410 +v 0.005205 0.000000 -0.000185 +v 0.004947 -0.000000 0.000023 +v -0.008083 -0.000000 0.008212 +v -0.002919 -0.000000 0.001555 +v 0.004676 -0.000000 0.000214 +v 0.004396 -0.000000 0.000384 +v 0.004107 -0.000000 0.000533 +v 0.003813 -0.000000 0.000659 +v 0.003516 -0.000000 0.000759 +v 0.002968 -0.000000 0.000921 +v 0.002979 -0.000000 0.000926 +v 0.003010 -0.000000 0.000941 +v 0.003056 -0.000000 0.000963 +v 0.003113 -0.000000 0.000992 +v 0.003179 -0.000000 0.001024 +v 0.003248 -0.000000 0.001058 +v 0.003318 -0.000000 0.001091 +v 0.003383 -0.000000 0.001123 +v 0.003441 -0.000000 0.001152 +v 0.003487 -0.000000 0.001174 +v 0.003517 -0.000000 0.001189 +v 0.003528 -0.000000 0.001194 +v 0.003769 -0.000000 0.001332 +v 0.004003 -0.000000 0.001494 +v 0.004233 -0.000000 0.001686 +v -0.002956 -0.000000 0.004878 +v -0.002371 -0.000000 0.001593 +v -0.002209 -0.000000 0.001606 +v -0.002055 -0.000000 0.001629 +v -0.001907 -0.000000 0.001660 +v -0.001766 -0.000000 0.001700 +v 0.004462 -0.000000 0.001911 +v -0.001632 -0.000000 0.001749 +v -0.001504 -0.000000 0.001806 +v -0.001384 -0.000000 0.001871 +v -0.001271 -0.000000 0.001946 +v 0.004695 -0.000000 0.002175 +v -0.001165 -0.000000 0.002028 +v -0.001066 -0.000000 0.002119 +v -0.000974 -0.000000 0.002219 +v 0.004935 -0.000000 0.002482 +v -0.000890 -0.000000 0.002327 +v -0.000843 -0.000000 0.002400 +v -0.000771 -0.000000 0.002519 +v 0.005185 -0.000000 0.002839 +v -0.000678 -0.000000 0.002679 +v -0.000564 -0.000000 0.002877 +v 0.005449 -0.000000 0.003249 +v -0.000431 -0.000000 0.003109 +v -0.000283 -0.000000 0.003372 +v 0.005730 -0.000000 0.003718 +v -0.000121 -0.000000 0.003662 +v 0.000053 -0.000000 0.003975 +v 0.006032 -0.000000 0.004250 +v 0.000237 -0.000000 0.004307 +v 0.006359 -0.000000 0.004851 +v 0.000429 -0.000000 0.004656 +v 0.000626 -0.000000 0.005018 +v 0.006714 -0.000000 0.005525 +v -0.002981 -0.000000 0.008212 +v 0.000828 -0.000000 0.005388 +v 0.001025 -0.000000 0.005758 +v 0.008083 -0.000000 0.008150 +v 0.001217 -0.000000 0.006114 +v 0.001402 -0.000000 0.006453 +v 0.001578 -0.000000 0.006771 +v 0.001743 -0.000000 0.007064 +v 0.001895 -0.000000 0.007330 +v 0.002032 -0.000000 0.007566 +v 0.002151 -0.000000 0.007767 +v 0.002252 -0.000000 0.007930 +v 0.002332 -0.000000 0.008053 +v 0.002389 -0.000000 0.008131 +v 0.002421 -0.000000 0.008162 +v 0.008029 -0.000000 0.008151 +v 0.007879 -0.000000 0.008153 +v 0.007652 -0.000000 0.008156 +v 0.007367 -0.000000 0.008160 +v 0.007044 -0.000000 0.008164 +v 0.002467 -0.000000 0.008168 +v 0.006702 -0.000000 0.008169 +v 0.002560 -0.000000 0.008174 +v 0.006360 -0.000000 0.008173 +v 0.006037 -0.000000 0.008178 +v 0.002697 -0.000000 0.008179 +v 0.005752 -0.000000 0.008181 +v 0.002874 -0.000000 0.008183 +v 0.005525 -0.000000 0.008185 +v 0.003087 -0.000000 0.008186 +v 0.005375 -0.000000 0.008187 +v 0.003334 -0.000000 0.008189 +v 0.005320 -0.000000 0.008187 +v 0.004946 -0.000000 0.008190 +v 0.003610 -0.000000 0.008191 +v 0.004584 -0.000000 0.008192 +v 0.003913 -0.000000 0.008192 +v 0.004239 -0.000000 0.008192 +v -0.005532 -0.000000 0.008212 +vn -0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.067 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 18//1 19//1 17//1 +f 18//1 20//1 19//1 +f 18//1 21//1 20//1 +f 18//1 22//1 21//1 +f 18//1 23//1 22//1 +f 18//1 24//1 23//1 +f 18//1 25//1 24//1 +f 18//1 26//1 25//1 +f 18//1 27//1 26//1 +f 18//1 28//1 27//1 +f 18//1 29//1 28//1 +f 18//1 30//1 29//1 +f 18//1 31//1 30//1 +f 18//1 32//1 31//1 +f 18//1 33//1 32//1 +f 18//1 34//1 33//1 +f 18//1 35//1 34//1 +f 18//1 36//1 35//1 +f 18//1 37//1 36//1 +f 18//1 38//1 37//1 +f 18//1 39//1 38//1 +f 18//1 40//1 39//1 +f 18//1 41//1 40//1 +f 18//1 42//1 41//1 +f 42//1 43//1 41//1 +f 43//1 44//1 41//1 +f 1//1 45//1 18//1 +f 46//1 44//1 43//1 +f 47//1 44//1 46//1 +f 48//1 44//1 47//1 +f 49//1 44//1 48//1 +f 50//1 44//1 49//1 +f 51//1 44//1 50//1 +f 52//1 44//1 51//1 +f 53//1 44//1 52//1 +f 53//1 54//1 44//1 +f 55//1 54//1 53//1 +f 56//1 54//1 55//1 +f 57//1 54//1 56//1 +f 58//1 54//1 57//1 +f 59//1 54//1 58//1 +f 59//1 60//1 54//1 +f 61//1 60//1 59//1 +f 62//1 60//1 61//1 +f 62//1 63//1 60//1 +f 64//1 63//1 62//1 +f 65//1 63//1 64//1 +f 65//1 66//1 63//1 +f 67//1 66//1 65//1 +f 68//1 66//1 67//1 +f 68//1 69//1 66//1 +f 1//1 70//1 45//1 +f 71//1 69//1 68//1 +f 71//1 72//1 69//1 +f 73//1 72//1 71//1 +f 74//1 72//1 73//1 +f 74//1 75//1 72//1 +f 76//1 75//1 74//1 +f 77//1 75//1 76//1 +f 77//1 78//1 75//1 +f 79//1 78//1 77//1 +f 80//1 78//1 79//1 +f 81//1 78//1 80//1 +f 81//1 82//1 78//1 +f 83//1 82//1 81//1 +f 84//1 82//1 83//1 +f 85//1 82//1 84//1 +f 86//1 82//1 85//1 +f 86//1 87//1 82//1 +f 88//1 87//1 86//1 +f 89//1 87//1 88//1 +f 90//1 87//1 89//1 +f 91//1 87//1 90//1 +f 1//1 92//1 70//1 +f 1//1 93//1 92//1 +f 1//1 94//1 93//1 +f 1//1 95//1 94//1 +f 1//1 96//1 95//1 +f 1//1 97//1 96//1 +f 1//1 98//1 97//1 +f 1//1 99//1 98//1 +f 1//1 100//1 99//1 +f 1//1 101//1 100//1 +f 1//1 102//1 101//1 +f 1//1 91//1 102//1 +f 1//1 87//1 91//1 +f 1//1 103//1 87//1 +f 1//1 104//1 103//1 +f 1//1 105//1 104//1 +f 1//1 106//1 105//1 +f 1//1 107//1 106//1 +f 1//1 108//1 107//1 +f 109//1 110//1 1//1 +f 110//1 108//1 1//1 +f 110//1 111//1 108//1 +f 110//1 112//1 111//1 +f 110//1 113//1 112//1 +f 110//1 114//1 113//1 +f 110//1 115//1 114//1 +f 110//1 116//1 115//1 +f 110//1 117//1 116//1 +f 110//1 118//1 117//1 +f 110//1 119//1 118//1 +f 110//1 120//1 119//1 +f 110//1 121//1 120//1 +f 110//1 122//1 121//1 +f 110//1 123//1 122//1 +f 110//1 124//1 123//1 +f 110//1 125//1 124//1 +f 110//1 126//1 125//1 +f 110//1 127//1 126//1 +f 110//1 128//1 127//1 +f 110//1 129//1 128//1 +f 110//1 130//1 129//1 +f 110//1 131//1 130//1 +f 109//1 132//1 110//1 +f 133//1 131//1 110//1 +f 134//1 131//1 133//1 +f 135//1 131//1 134//1 +f 136//1 131//1 135//1 +f 137//1 131//1 136//1 +f 137//1 138//1 131//1 +f 139//1 138//1 137//1 +f 140//1 138//1 139//1 +f 141//1 138//1 140//1 +f 142//1 138//1 141//1 +f 142//1 143//1 138//1 +f 144//1 143//1 142//1 +f 145//1 143//1 144//1 +f 146//1 143//1 145//1 +f 146//1 147//1 143//1 +f 148//1 147//1 146//1 +f 149//1 147//1 148//1 +f 150//1 147//1 149//1 +f 150//1 151//1 147//1 +f 152//1 151//1 150//1 +f 153//1 151//1 152//1 +f 153//1 154//1 151//1 +f 155//1 154//1 153//1 +f 156//1 154//1 155//1 +f 156//1 157//1 154//1 +f 158//1 157//1 156//1 +f 159//1 157//1 158//1 +f 159//1 160//1 157//1 +f 161//1 160//1 159//1 +f 161//1 162//1 160//1 +f 163//1 162//1 161//1 +f 164//1 162//1 163//1 +f 164//1 165//1 162//1 +f 109//1 166//1 132//1 +f 167//1 165//1 164//1 +f 168//1 165//1 167//1 +f 168//1 169//1 165//1 +f 170//1 169//1 168//1 +f 171//1 169//1 170//1 +f 172//1 169//1 171//1 +f 173//1 169//1 172//1 +f 174//1 169//1 173//1 +f 175//1 169//1 174//1 +f 176//1 169//1 175//1 +f 177//1 169//1 176//1 +f 178//1 169//1 177//1 +f 179//1 169//1 178//1 +f 180//1 169//1 179//1 +f 180//1 181//1 169//1 +f 180//1 182//1 181//1 +f 180//1 183//1 182//1 +f 180//1 184//1 183//1 +f 180//1 185//1 184//1 +f 186//1 185//1 180//1 +f 186//1 187//1 185//1 +f 188//1 187//1 186//1 +f 188//1 189//1 187//1 +f 188//1 190//1 189//1 +f 191//1 190//1 188//1 +f 191//1 192//1 190//1 +f 193//1 192//1 191//1 +f 193//1 194//1 192//1 +f 195//1 194//1 193//1 +f 195//1 196//1 194//1 +f 197//1 196//1 195//1 +f 197//1 198//1 196//1 +f 197//1 199//1 198//1 +f 200//1 199//1 197//1 +f 200//1 201//1 199//1 +f 202//1 201//1 200//1 +f 202//1 203//1 201//1 +f 204//2 166//2 109//2 diff --git a/alphanumeric/S.mtl b/alphanumeric/S.mtl new file mode 100644 index 0000000..50a5ad5 --- /dev/null +++ b/alphanumeric/S.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.070 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/S.obj b/alphanumeric/S.obj new file mode 100644 index 0000000..6b71e9e --- /dev/null +++ b/alphanumeric/S.obj @@ -0,0 +1,679 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib S.mtl +o S +v -0.000849 0.000000 -0.008434 +v 0.000110 0.000000 -0.008430 +v -0.000383 0.000000 -0.008437 +v -0.001272 0.000000 -0.008422 +v 0.000616 0.000000 -0.008414 +v -0.001637 0.000000 -0.008399 +v 0.001118 0.000000 -0.008390 +v -0.001928 0.000000 -0.008364 +v 0.001601 0.000000 -0.008358 +v -0.002486 0.000000 -0.008256 +v 0.002050 0.000000 -0.008319 +v 0.002450 0.000000 -0.008273 +v 0.002784 0.000000 -0.008222 +v -0.003009 0.000000 -0.008124 +v 0.003038 0.000000 -0.008165 +v 0.003295 0.000000 -0.008090 +v -0.003498 0.000000 -0.007968 +v 0.003542 0.000000 -0.008009 +v 0.003780 0.000000 -0.007920 +v -0.003954 0.000000 -0.007787 +v 0.004008 0.000000 -0.007826 +v 0.004227 0.000000 -0.007725 +v -0.004376 0.000000 -0.007580 +v 0.004436 0.000000 -0.007618 +v 0.004636 0.000000 -0.007504 +v -0.004765 0.000000 -0.007348 +v 0.004826 0.000000 -0.007384 +v 0.005007 0.000000 -0.007259 +v -0.005123 0.000000 -0.007090 +v 0.005178 0.000000 -0.007127 +v 0.005339 0.000000 -0.006989 +v -0.005449 0.000000 -0.006806 +v 0.005490 0.000000 -0.006846 +v 0.005709 0.000000 -0.006599 +v -0.005743 0.000000 -0.006494 +v 0.005922 0.000000 -0.006306 +v -0.006007 0.000000 -0.006154 +v 0.006125 0.000000 -0.005981 +v -0.006241 0.000000 -0.005786 +v 0.006313 0.000000 -0.005635 +v -0.006446 0.000000 -0.005390 +v -0.000341 0.000000 -0.005378 +v 0.006481 0.000000 -0.005281 +v -0.006509 0.000000 -0.005252 +v -0.000562 0.000000 -0.005368 +v -0.000115 0.000000 -0.005372 +v 0.000111 0.000000 -0.005350 +v -0.000776 0.000000 -0.005344 +v 0.000335 0.000000 -0.005312 +v -0.000980 0.000000 -0.005304 +v 0.000553 0.000000 -0.005259 +v -0.001169 0.000000 -0.005248 +v 0.006625 0.000000 -0.004930 +v 0.000762 0.000000 -0.005189 +v -0.006565 0.000000 -0.005121 +v -0.001341 0.000000 -0.005177 +v 0.000960 0.000000 -0.005104 +v -0.001492 0.000000 -0.005092 +v -0.006612 0.000000 -0.004996 +v 0.001066 0.000000 -0.005052 +v -0.001669 0.000000 -0.004952 +v 0.001161 0.000000 -0.004999 +v 0.001247 0.000000 -0.004944 +v -0.006652 0.000000 -0.004874 +v -0.001815 0.000000 -0.004802 +v 0.001326 0.000000 -0.004885 +v 0.006741 0.000000 -0.004594 +v 0.001398 0.000000 -0.004820 +v -0.006685 0.000000 -0.004751 +v 0.001467 0.000000 -0.004748 +v -0.001928 0.000000 -0.004644 +v -0.006712 0.000000 -0.004627 +v 0.001533 0.000000 -0.004666 +v 0.001597 0.000000 -0.004572 +v -0.002009 0.000000 -0.004481 +v -0.006733 0.000000 -0.004497 +v 0.006824 0.000000 -0.004287 +v 0.001663 0.000000 -0.004465 +v -0.006750 0.000000 -0.004360 +v -0.002059 0.000000 -0.004316 +v 0.001731 0.000000 -0.004343 +v -0.006762 0.000000 -0.004213 +v 0.001803 0.000000 -0.004204 +v -0.002076 0.000000 -0.004151 +v 0.006869 0.000000 -0.004018 +v -0.006771 0.000000 -0.004053 +v 0.001880 0.000000 -0.004046 +v -0.002060 0.000000 -0.003988 +v -0.006778 0.000000 -0.003878 +v 0.001949 0.000000 -0.003903 +v 0.006872 0.000000 -0.003802 +v -0.002012 0.000000 -0.003832 +v 0.002013 0.000000 -0.003776 +v -0.006782 0.000000 -0.003686 +v -0.001932 0.000000 -0.003683 +v 0.006829 0.000000 -0.003650 +v 0.002072 0.000000 -0.003666 +v -0.006780 0.000000 -0.003478 +v -0.001818 0.000000 -0.003545 +v 0.002126 0.000000 -0.003571 +v 0.006734 0.000000 -0.003574 +v 0.006689 0.000000 -0.003564 +v 0.002177 0.000000 -0.003491 +v 0.006611 0.000000 -0.003553 +v 0.006503 0.000000 -0.003542 +v -0.001672 0.000000 -0.003421 +v 0.006368 0.000000 -0.003530 +v 0.006207 0.000000 -0.003518 +v 0.006025 0.000000 -0.003505 +v 0.005822 0.000000 -0.003493 +v 0.005603 0.000000 -0.003481 +v 0.002224 0.000000 -0.003426 +v 0.005369 0.000000 -0.003469 +v -0.006776 0.000000 -0.003294 +v 0.005123 0.000000 -0.003457 +v 0.004868 0.000000 -0.003447 +v 0.004606 0.000000 -0.003437 +v 0.004344 0.000000 -0.003424 +v 0.002269 0.000000 -0.003374 +v 0.004088 0.000000 -0.003411 +v -0.001492 0.000000 -0.003312 +v 0.003841 0.000000 -0.003398 +v 0.003606 0.000000 -0.003385 +v 0.003385 0.000000 -0.003372 +v 0.002311 0.000000 -0.003336 +v 0.003181 0.000000 -0.003359 +v 0.002997 0.000000 -0.003347 +v 0.002835 0.000000 -0.003335 +v 0.002351 0.000000 -0.003310 +v 0.002698 0.000000 -0.003325 +v 0.002589 0.000000 -0.003315 +v 0.002511 0.000000 -0.003307 +v -0.001423 0.000000 -0.003283 +v 0.002390 0.000000 -0.003296 +v 0.002465 0.000000 -0.003300 +v 0.002428 0.000000 -0.003292 +v -0.006768 0.000000 -0.003130 +v -0.001318 0.000000 -0.003244 +v -0.001180 0.000000 -0.003198 +v -0.001011 0.000000 -0.003144 +v -0.000816 0.000000 -0.003085 +v -0.006755 0.000000 -0.002982 +v -0.000598 0.000000 -0.003020 +v -0.000359 0.000000 -0.002951 +v -0.006737 0.000000 -0.002845 +v -0.000102 0.000000 -0.002879 +v 0.000169 0.000000 -0.002805 +v -0.006712 0.000000 -0.002715 +v 0.000450 0.000000 -0.002729 +v 0.000740 0.000000 -0.002653 +v -0.006679 0.000000 -0.002588 +v 0.001034 0.000000 -0.002578 +v -0.006638 0.000000 -0.002461 +v 0.001961 0.000000 -0.002333 +v -0.006587 0.000000 -0.002328 +v 0.002785 0.000000 -0.002094 +v -0.006527 0.000000 -0.002186 +v -0.006455 0.000000 -0.002030 +v 0.003514 0.000000 -0.001856 +v -0.006371 0.000000 -0.001856 +v -0.006153 0.000000 -0.001463 +v 0.004157 0.000000 -0.001614 +v 0.004720 0.000000 -0.001364 +v -0.005896 0.000000 -0.001097 +v 0.005211 0.000000 -0.001102 +v 0.005639 0.000000 -0.000823 +v -0.005595 0.000000 -0.000755 +v 0.006010 0.000000 -0.000523 +v -0.005247 0.000000 -0.000435 +v 0.006332 0.000000 -0.000197 +v -0.004847 0.000000 -0.000135 +v 0.006613 -0.000000 0.000160 +v -0.004392 -0.000000 0.000147 +v -0.003878 -0.000000 0.000413 +v 0.006861 -0.000000 0.000550 +v -0.003301 -0.000000 0.000666 +v 0.007083 -0.000000 0.000980 +v -0.002657 -0.000000 0.000908 +v -0.001942 -0.000000 0.001141 +v 0.007147 -0.000000 0.001123 +v 0.007202 -0.000000 0.001256 +v -0.001153 -0.000000 0.001368 +v 0.007249 -0.000000 0.001382 +v -0.000285 -0.000000 0.001590 +v 0.007288 -0.000000 0.001505 +v 0.007321 -0.000000 0.001631 +v 0.000070 -0.000000 0.001680 +v 0.007347 -0.000000 0.001763 +v 0.000396 -0.000000 0.001771 +v 0.007368 -0.000000 0.001905 +v 0.000693 -0.000000 0.001863 +v 0.000962 -0.000000 0.001955 +v 0.007383 -0.000000 0.002061 +v 0.001205 -0.000000 0.002049 +v 0.001422 -0.000000 0.002145 +v 0.007394 -0.000000 0.002235 +v 0.001614 -0.000000 0.002244 +v 0.007401 -0.000000 0.002432 +v 0.001783 -0.000000 0.002345 +v 0.001929 -0.000000 0.002449 +v 0.007405 -0.000000 0.002655 +v -0.003596 -0.000000 0.002523 +v -0.002633 -0.000000 0.002462 +v -0.002637 -0.000000 0.002449 +v 0.002053 -0.000000 0.002557 +v -0.002622 -0.000000 0.002498 +v -0.002606 -0.000000 0.002552 +v -0.003737 -0.000000 0.002533 +v -0.003898 -0.000000 0.002545 +v -0.004075 -0.000000 0.002557 +v -0.002586 -0.000000 0.002620 +v 0.002157 -0.000000 0.002669 +v -0.004266 -0.000000 0.002570 +v -0.004468 -0.000000 0.002584 +v -0.004678 -0.000000 0.002598 +v -0.004894 -0.000000 0.002612 +v -0.005111 -0.000000 0.002626 +v -0.002562 -0.000000 0.002697 +v -0.005328 -0.000000 0.002639 +v -0.005542 -0.000000 0.002651 +v -0.005749 -0.000000 0.002663 +v 0.007406 -0.000000 0.002909 +v -0.005948 -0.000000 0.002673 +v 0.002241 -0.000000 0.002785 +v -0.006138 -0.000000 0.002686 +v -0.006321 -0.000000 0.002699 +v -0.002538 -0.000000 0.002778 +v -0.006495 -0.000000 0.002714 +v -0.006659 -0.000000 0.002728 +v -0.006811 -0.000000 0.002743 +v -0.006950 -0.000000 0.002758 +v -0.007072 -0.000000 0.002773 +v -0.007178 -0.000000 0.002787 +v -0.002513 -0.000000 0.002860 +v 0.002326 -0.000000 0.002934 +v -0.007264 -0.000000 0.002800 +v -0.007329 -0.000000 0.002813 +v -0.007372 -0.000000 0.002824 +v -0.007391 -0.000000 0.002834 +v -0.007406 -0.000000 0.002909 +v -0.002490 -0.000000 0.002937 +v -0.007400 -0.000000 0.003032 +v 0.007405 -0.000000 0.003176 +v 0.002393 -0.000000 0.003084 +v -0.002469 -0.000000 0.003005 +v -0.002453 -0.000000 0.003059 +v -0.007377 -0.000000 0.003198 +v -0.002442 -0.000000 0.003095 +v 0.002442 -0.000000 0.003233 +v -0.002438 -0.000000 0.003108 +v -0.002332 -0.000000 0.003440 +v 0.007402 -0.000000 0.003408 +v -0.007339 -0.000000 0.003398 +v 0.002475 -0.000000 0.003382 +v 0.002490 -0.000000 0.003531 +v -0.007287 -0.000000 0.003624 +v 0.007395 -0.000000 0.003610 +v -0.002210 -0.000000 0.003739 +v 0.002487 -0.000000 0.003677 +v 0.007383 -0.000000 0.003788 +v -0.007223 -0.000000 0.003869 +v 0.002468 -0.000000 0.003823 +v -0.002073 -0.000000 0.004007 +v 0.007366 -0.000000 0.003948 +v 0.002431 -0.000000 0.003966 +v -0.007151 -0.000000 0.004124 +v 0.007342 -0.000000 0.004094 +v 0.002378 -0.000000 0.004108 +v -0.001919 -0.000000 0.004244 +v 0.007311 -0.000000 0.004233 +v 0.002308 -0.000000 0.004246 +v -0.007071 -0.000000 0.004382 +v 0.007272 -0.000000 0.004369 +v -0.001746 -0.000000 0.004452 +v 0.002221 -0.000000 0.004382 +v 0.007223 -0.000000 0.004508 +v 0.002117 -0.000000 0.004514 +v -0.006985 -0.000000 0.004635 +v -0.001553 -0.000000 0.004632 +v 0.007163 -0.000000 0.004656 +v 0.001994 -0.000000 0.004644 +v -0.001339 -0.000000 0.004786 +v -0.006897 -0.000000 0.004875 +v 0.001866 -0.000000 0.004758 +v 0.007092 -0.000000 0.004818 +v 0.001732 -0.000000 0.004859 +v -0.001103 -0.000000 0.004913 +v 0.007008 -0.000000 0.005000 +v 0.001591 -0.000000 0.004946 +v -0.006808 -0.000000 0.005095 +v -0.000844 -0.000000 0.005015 +v 0.001442 -0.000000 0.005019 +v 0.006793 -0.000000 0.005406 +v -0.000559 -0.000000 0.005094 +v 0.001285 -0.000000 0.005079 +v 0.001117 -0.000000 0.005126 +v -0.000249 -0.000000 0.005151 +v -0.006719 -0.000000 0.005286 +v 0.000938 -0.000000 0.005161 +v 0.000088 -0.000000 0.005186 +v 0.000747 -0.000000 0.005184 +v 0.000542 -0.000000 0.005196 +v 0.000323 -0.000000 0.005196 +v -0.006509 -0.000000 0.005680 +v 0.006548 -0.000000 0.005791 +v -0.006275 -0.000000 0.006046 +v 0.006274 -0.000000 0.006155 +v -0.006018 -0.000000 0.006386 +v 0.005971 -0.000000 0.006495 +v -0.005736 -0.000000 0.006698 +v 0.005641 -0.000000 0.006810 +v -0.005430 -0.000000 0.006984 +v 0.005286 -0.000000 0.007101 +v -0.005100 -0.000000 0.007244 +v 0.004906 -0.000000 0.007365 +v -0.004744 -0.000000 0.007477 +v 0.004502 -0.000000 0.007602 +v -0.004363 -0.000000 0.007685 +v 0.004077 -0.000000 0.007811 +v -0.003956 -0.000000 0.007867 +v 0.003630 -0.000000 0.007991 +v -0.003522 -0.000000 0.008023 +v 0.003163 -0.000000 0.008141 +v -0.003062 -0.000000 0.008154 +v 0.002677 -0.000000 0.008260 +v -0.002575 -0.000000 0.008260 +v -0.002227 -0.000000 0.008314 +v 0.002357 -0.000000 0.008314 +v -0.001825 -0.000000 0.008358 +v 0.001979 -0.000000 0.008358 +v -0.001380 -0.000000 0.008393 +v 0.001552 -0.000000 0.008393 +v -0.000904 -0.000000 0.008417 +v 0.001089 -0.000000 0.008417 +v -0.000407 -0.000000 0.008432 +v 0.000600 -0.000000 0.008432 +v 0.000098 -0.000000 0.008437 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.070 +s off +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 10//1 12//1 11//1 +f 10//1 13//1 12//1 +f 14//1 13//1 10//1 +f 14//1 15//1 13//1 +f 14//1 16//1 15//1 +f 17//1 16//1 14//1 +f 17//1 18//1 16//1 +f 17//1 19//1 18//1 +f 20//1 19//1 17//1 +f 20//1 21//1 19//1 +f 20//1 22//1 21//1 +f 23//1 22//1 20//1 +f 23//1 24//1 22//1 +f 23//1 25//1 24//1 +f 26//1 25//1 23//1 +f 26//1 27//1 25//1 +f 26//1 28//1 27//1 +f 29//1 28//1 26//1 +f 29//1 30//1 28//1 +f 29//1 31//1 30//1 +f 32//1 31//1 29//1 +f 32//1 33//1 31//1 +f 32//1 34//1 33//1 +f 35//1 34//1 32//1 +f 35//1 36//1 34//1 +f 37//1 36//1 35//1 +f 37//1 38//1 36//1 +f 39//1 38//1 37//1 +f 39//1 40//1 38//1 +f 41//1 40//1 39//1 +f 41//1 42//1 40//1 +f 42//1 43//1 40//1 +f 44//1 42//1 41//1 +f 44//1 45//1 42//1 +f 46//1 43//1 42//1 +f 47//1 43//1 46//1 +f 44//1 48//1 45//1 +f 49//1 43//1 47//1 +f 44//1 50//1 48//1 +f 51//1 43//1 49//1 +f 44//1 52//1 50//1 +f 51//1 53//1 43//1 +f 54//1 53//1 51//1 +f 55//1 52//1 44//1 +f 55//1 56//1 52//1 +f 57//1 53//1 54//1 +f 55//1 58//1 56//1 +f 59//1 58//1 55//1 +f 60//1 53//1 57//1 +f 59//1 61//1 58//1 +f 62//1 53//1 60//1 +f 63//1 53//1 62//1 +f 64//1 61//1 59//1 +f 64//1 65//1 61//1 +f 66//1 53//1 63//1 +f 66//1 67//1 53//1 +f 68//1 67//1 66//1 +f 69//1 65//1 64//1 +f 70//1 67//1 68//1 +f 69//1 71//1 65//1 +f 72//1 71//1 69//1 +f 73//1 67//1 70//1 +f 74//1 67//1 73//1 +f 72//1 75//1 71//1 +f 76//1 75//1 72//1 +f 74//1 77//1 67//1 +f 78//1 77//1 74//1 +f 79//1 75//1 76//1 +f 79//1 80//1 75//1 +f 81//1 77//1 78//1 +f 82//1 80//1 79//1 +f 83//1 77//1 81//1 +f 82//1 84//1 80//1 +f 83//1 85//1 77//1 +f 86//1 84//1 82//1 +f 87//1 85//1 83//1 +f 86//1 88//1 84//1 +f 89//1 88//1 86//1 +f 90//1 85//1 87//1 +f 90//1 91//1 85//1 +f 89//1 92//1 88//1 +f 93//1 91//1 90//1 +f 94//1 92//1 89//1 +f 94//1 95//1 92//1 +f 93//1 96//1 91//1 +f 97//1 96//1 93//1 +f 98//1 95//1 94//1 +f 98//1 99//1 95//1 +f 100//1 96//1 97//1 +f 100//1 101//1 96//1 +f 100//1 102//1 101//1 +f 103//1 102//1 100//1 +f 103//1 104//1 102//1 +f 103//1 105//1 104//1 +f 98//1 106//1 99//1 +f 103//1 107//1 105//1 +f 103//1 108//1 107//1 +f 103//1 109//1 108//1 +f 103//1 110//1 109//1 +f 103//1 111//1 110//1 +f 112//1 111//1 103//1 +f 112//1 113//1 111//1 +f 114//1 106//1 98//1 +f 112//1 115//1 113//1 +f 112//1 116//1 115//1 +f 112//1 117//1 116//1 +f 112//1 118//1 117//1 +f 119//1 118//1 112//1 +f 119//1 120//1 118//1 +f 114//1 121//1 106//1 +f 119//1 122//1 120//1 +f 119//1 123//1 122//1 +f 119//1 124//1 123//1 +f 125//1 124//1 119//1 +f 125//1 126//1 124//1 +f 125//1 127//1 126//1 +f 125//1 128//1 127//1 +f 129//1 128//1 125//1 +f 129//1 130//1 128//1 +f 129//1 131//1 130//1 +f 129//1 132//1 131//1 +f 114//1 133//1 121//1 +f 134//1 132//1 129//1 +f 134//1 135//1 132//1 +f 134//1 136//1 135//1 +f 137//1 133//1 114//1 +f 137//1 138//1 133//1 +f 137//1 139//1 138//1 +f 137//1 140//1 139//1 +f 137//1 141//1 140//1 +f 142//1 141//1 137//1 +f 142//1 143//1 141//1 +f 142//1 144//1 143//1 +f 145//1 144//1 142//1 +f 145//1 146//1 144//1 +f 145//1 147//1 146//1 +f 148//1 147//1 145//1 +f 148//1 149//1 147//1 +f 148//1 150//1 149//1 +f 151//1 150//1 148//1 +f 151//1 152//1 150//1 +f 153//1 152//1 151//1 +f 153//1 154//1 152//1 +f 155//1 154//1 153//1 +f 155//1 156//1 154//1 +f 157//1 156//1 155//1 +f 158//1 156//1 157//1 +f 158//1 159//1 156//1 +f 160//1 159//1 158//1 +f 161//1 159//1 160//1 +f 161//1 162//1 159//1 +f 161//1 163//1 162//1 +f 164//1 163//1 161//1 +f 164//1 165//1 163//1 +f 164//1 166//1 165//1 +f 167//1 166//1 164//1 +f 167//1 168//1 166//1 +f 169//1 168//1 167//1 +f 169//1 170//1 168//1 +f 171//1 170//1 169//1 +f 171//1 172//1 170//1 +f 173//1 172//1 171//1 +f 174//1 172//1 173//1 +f 174//1 175//1 172//1 +f 176//1 175//1 174//1 +f 176//1 177//1 175//1 +f 178//1 177//1 176//1 +f 179//1 177//1 178//1 +f 179//1 180//1 177//1 +f 179//1 181//1 180//1 +f 182//1 181//1 179//1 +f 182//1 183//1 181//1 +f 184//1 183//1 182//1 +f 184//1 185//1 183//1 +f 184//1 186//1 185//1 +f 187//1 186//1 184//1 +f 187//1 188//1 186//1 +f 189//1 188//1 187//1 +f 189//1 190//1 188//1 +f 191//1 190//1 189//1 +f 192//1 190//1 191//1 +f 192//1 193//1 190//1 +f 194//1 193//1 192//1 +f 195//1 193//1 194//1 +f 195//1 196//1 193//1 +f 197//1 196//1 195//1 +f 197//1 198//1 196//1 +f 199//1 198//1 197//1 +f 200//1 198//1 199//1 +f 200//1 201//1 198//1 +f 202//1 203//1 204//1 +f 205//1 201//1 200//1 +f 202//1 206//1 203//1 +f 202//1 207//1 206//1 +f 208//1 207//1 202//1 +f 209//1 207//1 208//1 +f 210//1 207//1 209//1 +f 210//1 211//1 207//1 +f 212//1 201//1 205//1 +f 213//1 211//1 210//1 +f 214//1 211//1 213//1 +f 215//1 211//1 214//1 +f 216//1 211//1 215//1 +f 217//1 211//1 216//1 +f 217//1 218//1 211//1 +f 219//1 218//1 217//1 +f 220//1 218//1 219//1 +f 221//1 218//1 220//1 +f 212//1 222//1 201//1 +f 223//1 218//1 221//1 +f 224//1 222//1 212//1 +f 225//1 218//1 223//1 +f 226//1 218//1 225//1 +f 226//1 227//1 218//1 +f 228//1 227//1 226//1 +f 229//1 227//1 228//1 +f 230//1 227//1 229//1 +f 231//1 227//1 230//1 +f 232//1 227//1 231//1 +f 233//1 227//1 232//1 +f 233//1 234//1 227//1 +f 235//1 222//1 224//1 +f 236//1 234//1 233//1 +f 237//1 234//1 236//1 +f 238//1 234//1 237//1 +f 239//1 234//1 238//1 +f 240//1 234//1 239//1 +f 240//1 241//1 234//1 +f 242//1 241//1 240//1 +f 235//1 243//1 222//1 +f 244//1 243//1 235//1 +f 242//1 245//1 241//1 +f 242//1 246//1 245//1 +f 247//1 246//1 242//1 +f 247//1 248//1 246//1 +f 249//1 243//1 244//1 +f 247//1 250//1 248//1 +f 247//1 251//1 250//1 +f 249//1 252//1 243//1 +f 253//1 251//1 247//1 +f 254//1 252//1 249//1 +f 255//1 252//1 254//1 +f 256//1 251//1 253//1 +f 255//1 257//1 252//1 +f 256//1 258//1 251//1 +f 259//1 257//1 255//1 +f 259//1 260//1 257//1 +f 261//1 258//1 256//1 +f 262//1 260//1 259//1 +f 261//1 263//1 258//1 +f 262//1 264//1 260//1 +f 265//1 264//1 262//1 +f 266//1 263//1 261//1 +f 265//1 267//1 264//1 +f 268//1 267//1 265//1 +f 266//1 269//1 263//1 +f 268//1 270//1 267//1 +f 271//1 270//1 268//1 +f 272//1 269//1 266//1 +f 271//1 273//1 270//1 +f 272//1 274//1 269//1 +f 275//1 273//1 271//1 +f 275//1 276//1 273//1 +f 277//1 276//1 275//1 +f 278//1 274//1 272//1 +f 278//1 279//1 274//1 +f 277//1 280//1 276//1 +f 281//1 280//1 277//1 +f 278//1 282//1 279//1 +f 283//1 282//1 278//1 +f 284//1 280//1 281//1 +f 284//1 285//1 280//1 +f 286//1 285//1 284//1 +f 283//1 287//1 282//1 +f 286//1 288//1 285//1 +f 289//1 288//1 286//1 +f 290//1 287//1 283//1 +f 290//1 291//1 287//1 +f 292//1 288//1 289//1 +f 292//1 293//1 288//1 +f 290//1 294//1 291//1 +f 295//1 293//1 292//1 +f 296//1 293//1 295//1 +f 290//1 297//1 294//1 +f 298//1 297//1 290//1 +f 299//1 293//1 296//1 +f 298//1 300//1 297//1 +f 301//1 293//1 299//1 +f 302//1 293//1 301//1 +f 298//1 303//1 300//1 +f 303//1 293//1 302//1 +f 298//1 293//1 303//1 +f 304//1 293//1 298//1 +f 304//1 305//1 293//1 +f 306//1 305//1 304//1 +f 306//1 307//1 305//1 +f 308//1 307//1 306//1 +f 308//1 309//1 307//1 +f 310//1 309//1 308//1 +f 310//1 311//1 309//1 +f 312//1 311//1 310//1 +f 312//1 313//1 311//1 +f 314//1 313//1 312//1 +f 314//1 315//1 313//1 +f 316//1 315//1 314//1 +f 316//1 317//1 315//1 +f 318//1 317//1 316//1 +f 318//1 319//1 317//1 +f 320//1 319//1 318//1 +f 320//1 321//1 319//1 +f 322//1 321//1 320//1 +f 322//1 323//1 321//1 +f 324//1 323//1 322//1 +f 324//1 325//1 323//1 +f 326//1 325//1 324//1 +f 327//1 325//1 326//1 +f 327//1 328//1 325//1 +f 329//1 328//1 327//1 +f 329//1 330//1 328//1 +f 331//1 330//1 329//1 +f 331//1 332//1 330//1 +f 333//1 332//1 331//1 +f 333//1 334//1 332//1 +f 335//1 334//1 333//1 +f 335//1 336//1 334//1 +f 337//1 336//1 335//1 diff --git a/alphanumeric/T.mtl b/alphanumeric/T.mtl new file mode 100644 index 0000000..13a5371 --- /dev/null +++ b/alphanumeric/T.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.071 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/T.obj b/alphanumeric/T.obj new file mode 100644 index 0000000..711c7cc --- /dev/null +++ b/alphanumeric/T.obj @@ -0,0 +1,38 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib T.mtl +o T +v -0.007716 0.000000 -0.006160 +v 0.000000 0.000000 -0.008213 +v -0.007716 0.000000 -0.008213 +v 0.007716 0.000000 -0.008213 +v 0.007716 0.000000 -0.006160 +v -0.007716 0.000000 -0.004107 +v 0.007716 0.000000 -0.004107 +v -0.005103 0.000000 -0.004107 +v -0.002489 0.000000 -0.004107 +v -0.002489 -0.000000 0.002052 +v 0.002613 0.000000 -0.004107 +v 0.002613 -0.000000 0.002052 +v 0.005165 0.000000 -0.004107 +v -0.002489 -0.000000 0.008213 +v 0.002613 -0.000000 0.008213 +v 0.000062 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.071 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 5//1 1//1 +f 6//1 7//1 5//1 +f 8//2 7//2 6//2 +f 9//2 7//2 8//2 +f 10//1 11//1 9//1 +f 11//2 7//2 9//2 +f 10//1 12//1 11//1 +f 13//2 7//2 11//2 +f 14//1 12//1 10//1 +f 14//1 15//1 12//1 +f 16//2 15//2 14//2 diff --git a/alphanumeric/U.mtl b/alphanumeric/U.mtl new file mode 100644 index 0000000..af8d8a2 --- /dev/null +++ b/alphanumeric/U.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.073 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/U.obj b/alphanumeric/U.obj new file mode 100644 index 0000000..d2ef549 --- /dev/null +++ b/alphanumeric/U.obj @@ -0,0 +1,327 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib U.mtl +o U +v -0.007915 0.000000 -0.008218 +v -0.005352 0.000000 -0.008326 +v -0.007916 0.000000 -0.008326 +v -0.002801 0.000000 -0.008326 +v -0.002800 0.000000 -0.008219 +v 0.002812 0.000000 -0.002889 +v 0.005364 0.000000 -0.008326 +v 0.002812 0.000000 -0.008326 +v 0.007916 0.000000 -0.008326 +v 0.007916 0.000000 -0.008219 +v -0.002797 0.000000 -0.007924 +v 0.007916 0.000000 -0.007924 +v -0.007910 0.000000 -0.007919 +v -0.002794 0.000000 -0.007477 +v 0.007916 0.000000 -0.007477 +v -0.007906 0.000000 -0.007467 +v -0.002791 0.000000 -0.006917 +v 0.007916 0.000000 -0.006917 +v -0.007900 0.000000 -0.006901 +v -0.002787 0.000000 -0.006281 +v 0.007916 0.000000 -0.006281 +v -0.007892 0.000000 -0.006258 +v -0.002782 0.000000 -0.005608 +v 0.007916 0.000000 -0.005608 +v -0.007885 0.000000 -0.005577 +v -0.002777 0.000000 -0.004934 +v 0.007916 0.000000 -0.004934 +v -0.007877 0.000000 -0.004896 +v -0.002773 0.000000 -0.004299 +v 0.007916 0.000000 -0.004299 +v -0.007870 0.000000 -0.004253 +v -0.002769 0.000000 -0.003739 +v 0.007916 0.000000 -0.003739 +v -0.007862 0.000000 -0.003686 +v -0.002765 0.000000 -0.003292 +v 0.007916 0.000000 -0.003292 +v -0.007857 0.000000 -0.003234 +v -0.002763 0.000000 -0.002996 +v 0.007916 0.000000 -0.002996 +v -0.007854 0.000000 -0.002935 +v -0.002763 0.000000 -0.002889 +v 0.007916 0.000000 -0.002889 +v -0.007854 0.000000 -0.002827 +v -0.002752 0.000000 -0.001547 +v 0.002812 0.000000 -0.001725 +v 0.007915 0.000000 -0.001522 +v -0.007844 0.000000 -0.001704 +v 0.002809 0.000000 -0.000735 +v -0.007833 0.000000 -0.000736 +v -0.002739 0.000000 -0.000425 +v 0.007909 0.000000 -0.000363 +v -0.007823 -0.000000 0.000093 +v 0.002805 -0.000000 0.000096 +v -0.002724 -0.000000 0.000500 +v 0.007900 -0.000000 0.000612 +v -0.007810 -0.000000 0.000795 +v 0.002799 -0.000000 0.000785 +v -0.002703 -0.000000 0.001250 +v 0.007880 -0.000000 0.001426 +v 0.002788 -0.000000 0.001347 +v -0.007794 -0.000000 0.001384 +v -0.002677 -0.000000 0.001848 +v 0.002773 -0.000000 0.001797 +v -0.007777 -0.000000 0.001874 +v 0.007853 -0.000000 0.002101 +v 0.002755 -0.000000 0.002153 +v -0.002639 -0.000000 0.002317 +v -0.007756 -0.000000 0.002278 +v 0.007812 -0.000000 0.002659 +v 0.002731 -0.000000 0.002429 +v -0.007731 -0.000000 0.002610 +v -0.002589 -0.000000 0.002678 +v 0.002702 -0.000000 0.002642 +v -0.007701 -0.000000 0.002885 +v 0.002667 -0.000000 0.002807 +v 0.007758 -0.000000 0.003124 +v -0.002524 -0.000000 0.002955 +v 0.002625 -0.000000 0.002941 +v -0.007667 -0.000000 0.003114 +v 0.002576 -0.000000 0.003059 +v -0.002441 -0.000000 0.003171 +v 0.002456 -0.000000 0.003291 +v -0.007626 -0.000000 0.003313 +v 0.007688 -0.000000 0.003518 +v -0.002338 -0.000000 0.003346 +v 0.002320 -0.000000 0.003502 +v -0.007579 -0.000000 0.003494 +v -0.002214 -0.000000 0.003504 +v -0.007490 -0.000000 0.003797 +v 0.002169 -0.000000 0.003692 +v -0.002066 -0.000000 0.003668 +v 0.007600 -0.000000 0.003863 +v -0.001935 -0.000000 0.003797 +v 0.002001 -0.000000 0.003861 +v -0.001795 -0.000000 0.003915 +v -0.007394 -0.000000 0.004090 +v 0.001817 -0.000000 0.004010 +v 0.007492 -0.000000 0.004184 +v -0.001647 -0.000000 0.004022 +v 0.001616 -0.000000 0.004138 +v -0.001490 -0.000000 0.004119 +v -0.007287 -0.000000 0.004372 +v -0.001326 -0.000000 0.004205 +v 0.001398 -0.000000 0.004246 +v 0.007362 -0.000000 0.004501 +v -0.001153 -0.000000 0.004280 +v 0.001163 -0.000000 0.004334 +v -0.000973 -0.000000 0.004343 +v 0.000911 -0.000000 0.004403 +v -0.000784 -0.000000 0.004396 +v -0.007172 -0.000000 0.004645 +v -0.000587 -0.000000 0.004437 +v 0.000642 -0.000000 0.004451 +v -0.000383 -0.000000 0.004466 +v 0.000355 -0.000000 0.004480 +v -0.000171 -0.000000 0.004484 +v 0.000049 -0.000000 0.004490 +v 0.007207 -0.000000 0.004838 +v -0.007047 -0.000000 0.004908 +v 0.007074 -0.000000 0.005084 +v -0.006913 -0.000000 0.005162 +v 0.006907 -0.000000 0.005347 +v -0.006770 -0.000000 0.005406 +v 0.006707 -0.000000 0.005621 +v -0.006617 -0.000000 0.005641 +v 0.006483 -0.000000 0.005899 +v -0.006456 -0.000000 0.005867 +v -0.006284 -0.000000 0.006084 +v 0.006237 -0.000000 0.006178 +v -0.006103 -0.000000 0.006293 +v 0.005977 -0.000000 0.006451 +v -0.005912 -0.000000 0.006493 +v 0.005706 -0.000000 0.006713 +v -0.005583 -0.000000 0.006804 +v 0.005433 -0.000000 0.006958 +v -0.005253 -0.000000 0.007078 +v 0.005158 -0.000000 0.007181 +v -0.004913 -0.000000 0.007317 +v 0.004891 -0.000000 0.007376 +v -0.004558 -0.000000 0.007525 +v 0.004633 -0.000000 0.007539 +v -0.004182 -0.000000 0.007703 +v 0.004393 -0.000000 0.007663 +v 0.004095 -0.000000 0.007792 +v -0.003775 -0.000000 0.007854 +v 0.003787 -0.000000 0.007906 +v -0.003335 -0.000000 0.007979 +v 0.003468 -0.000000 0.008005 +v -0.002851 -0.000000 0.008082 +v 0.003136 -0.000000 0.008090 +v -0.002319 -0.000000 0.008164 +v 0.002786 -0.000000 0.008162 +v 0.002417 -0.000000 0.008219 +v -0.001731 -0.000000 0.008228 +v 0.002026 -0.000000 0.008264 +v -0.001081 -0.000000 0.008276 +v 0.001610 -0.000000 0.008297 +v -0.000361 -0.000000 0.008310 +v 0.001166 -0.000000 0.008317 +v 0.000183 -0.000000 0.008323 +v 0.000691 -0.000000 0.008326 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.073 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 7//1 8//1 +f 6//1 9//1 7//1 +f 6//1 10//1 9//1 +f 1//1 11//1 5//1 +f 6//1 12//1 10//1 +f 13//1 11//1 1//1 +f 13//1 14//1 11//1 +f 6//1 15//1 12//1 +f 16//1 14//1 13//1 +f 16//1 17//1 14//1 +f 6//1 18//1 15//1 +f 19//1 17//1 16//1 +f 19//1 20//1 17//1 +f 6//1 21//1 18//1 +f 22//1 20//1 19//1 +f 22//1 23//1 20//1 +f 6//1 24//1 21//1 +f 25//1 23//1 22//1 +f 25//1 26//1 23//1 +f 6//1 27//1 24//1 +f 28//1 26//1 25//1 +f 28//1 29//1 26//1 +f 6//1 30//1 27//1 +f 31//1 29//1 28//1 +f 31//1 32//1 29//1 +f 6//1 33//1 30//1 +f 34//1 32//1 31//1 +f 34//1 35//1 32//1 +f 6//1 36//1 33//1 +f 37//1 35//1 34//1 +f 37//1 38//1 35//1 +f 6//1 39//1 36//1 +f 40//1 38//1 37//1 +f 40//1 41//1 38//1 +f 6//1 42//1 39//1 +f 43//1 41//1 40//1 +f 43//1 44//1 41//1 +f 45//1 42//1 6//1 +f 45//1 46//1 42//1 +f 47//1 44//1 43//1 +f 48//1 46//1 45//1 +f 49//1 44//1 47//1 +f 49//1 50//1 44//1 +f 48//1 51//1 46//1 +f 52//1 50//1 49//1 +f 53//1 51//1 48//1 +f 52//1 54//1 50//1 +f 53//1 55//1 51//1 +f 56//1 54//1 52//1 +f 57//1 55//1 53//1 +f 56//1 58//1 54//1 +f 57//1 59//1 55//1 +f 60//1 59//1 57//1 +f 61//1 58//1 56//1 +f 61//1 62//1 58//1 +f 63//1 59//1 60//1 +f 64//1 62//1 61//1 +f 63//1 65//1 59//1 +f 66//1 65//1 63//1 +f 64//1 67//1 62//1 +f 68//1 67//1 64//1 +f 66//1 69//1 65//1 +f 70//1 69//1 66//1 +f 71//1 67//1 68//1 +f 71//1 72//1 67//1 +f 73//1 69//1 70//1 +f 74//1 72//1 71//1 +f 75//1 69//1 73//1 +f 75//1 76//1 69//1 +f 74//1 77//1 72//1 +f 78//1 76//1 75//1 +f 79//1 77//1 74//1 +f 80//1 76//1 78//1 +f 79//1 81//1 77//1 +f 82//1 76//1 80//1 +f 83//1 81//1 79//1 +f 82//1 84//1 76//1 +f 83//1 85//1 81//1 +f 86//1 84//1 82//1 +f 87//1 85//1 83//1 +f 87//1 88//1 85//1 +f 89//1 88//1 87//1 +f 90//1 84//1 86//1 +f 89//1 91//1 88//1 +f 90//1 92//1 84//1 +f 89//1 93//1 91//1 +f 94//1 92//1 90//1 +f 89//1 95//1 93//1 +f 96//1 95//1 89//1 +f 97//1 92//1 94//1 +f 97//1 98//1 92//1 +f 96//1 99//1 95//1 +f 100//1 98//1 97//1 +f 96//1 101//1 99//1 +f 102//1 101//1 96//1 +f 102//1 103//1 101//1 +f 104//1 98//1 100//1 +f 104//1 105//1 98//1 +f 102//1 106//1 103//1 +f 107//1 105//1 104//1 +f 102//1 108//1 106//1 +f 109//1 105//1 107//1 +f 102//1 110//1 108//1 +f 111//1 110//1 102//1 +f 111//1 112//1 110//1 +f 113//1 105//1 109//1 +f 111//1 114//1 112//1 +f 115//1 105//1 113//1 +f 111//1 116//1 114//1 +f 117//1 105//1 115//1 +f 111//1 117//1 116//1 +f 111//1 105//1 117//1 +f 111//1 118//1 105//1 +f 119//1 118//1 111//1 +f 119//1 120//1 118//1 +f 121//1 120//1 119//1 +f 121//1 122//1 120//1 +f 123//1 122//1 121//1 +f 123//1 124//1 122//1 +f 125//1 124//1 123//1 +f 125//1 126//1 124//1 +f 127//1 126//1 125//1 +f 128//1 126//1 127//1 +f 128//1 129//1 126//1 +f 130//1 129//1 128//1 +f 130//1 131//1 129//1 +f 132//1 131//1 130//1 +f 132//1 133//1 131//1 +f 134//1 133//1 132//1 +f 134//1 135//1 133//1 +f 136//1 135//1 134//1 +f 136//1 137//1 135//1 +f 138//1 137//1 136//1 +f 138//1 139//1 137//1 +f 140//1 139//1 138//1 +f 140//1 141//1 139//1 +f 142//1 141//1 140//1 +f 142//1 143//1 141//1 +f 142//1 144//1 143//1 +f 145//1 144//1 142//1 +f 145//1 146//1 144//1 +f 147//1 146//1 145//1 +f 147//1 148//1 146//1 +f 149//1 148//1 147//1 +f 149//1 150//1 148//1 +f 151//1 150//1 149//1 +f 151//1 152//1 150//1 +f 151//1 153//1 152//1 +f 154//1 153//1 151//1 +f 154//1 155//1 153//1 +f 156//1 155//1 154//1 +f 156//1 157//1 155//1 +f 158//1 157//1 156//1 +f 158//1 159//1 157//1 +f 160//1 159//1 158//1 +f 160//1 161//1 159//1 diff --git a/alphanumeric/V.mtl b/alphanumeric/V.mtl new file mode 100644 index 0000000..b12d9f1 --- /dev/null +++ b/alphanumeric/V.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.074 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/V.obj b/alphanumeric/V.obj new file mode 100644 index 0000000..f2764a9 --- /dev/null +++ b/alphanumeric/V.obj @@ -0,0 +1,257 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib V.mtl +o V +v -0.008841 0.000000 -0.008194 +v -0.008795 0.000000 -0.008213 +v -0.008848 0.000000 -0.008213 +v -0.008651 0.000000 -0.008213 +v -0.008430 0.000000 -0.008213 +v -0.008155 0.000000 -0.008213 +v -0.007842 0.000000 -0.008213 +v -0.007511 0.000000 -0.008213 +v -0.007180 0.000000 -0.008213 +v -0.006867 0.000000 -0.008213 +v -0.006591 0.000000 -0.008213 +v -0.006371 0.000000 -0.008213 +v -0.006226 0.000000 -0.008213 +v -0.006173 0.000000 -0.008213 +v -0.005812 0.000000 -0.008212 +v 0.003422 0.000000 -0.007191 +v 0.006297 0.000000 -0.008213 +v 0.003733 0.000000 -0.008213 +v 0.008848 0.000000 -0.008213 +v 0.008847 0.000000 -0.008206 +v -0.005465 0.000000 -0.008210 +v -0.005136 0.000000 -0.008207 +v -0.004827 0.000000 -0.008205 +v 0.008844 0.000000 -0.008190 +v -0.004542 0.000000 -0.008201 +v -0.004285 0.000000 -0.008195 +v -0.004058 0.000000 -0.008190 +v -0.008822 0.000000 -0.008141 +v -0.003865 0.000000 -0.008182 +v 0.008838 0.000000 -0.008163 +v -0.003709 0.000000 -0.008175 +v -0.003594 0.000000 -0.008167 +v -0.003522 0.000000 -0.008159 +v 0.008830 0.000000 -0.008132 +v -0.003497 0.000000 -0.008150 +v -0.003428 0.000000 -0.007896 +v -0.008793 0.000000 -0.008062 +v 0.008821 0.000000 -0.008096 +v 0.008811 0.000000 -0.008056 +v -0.008755 0.000000 -0.007963 +v 0.008802 0.000000 -0.008019 +v 0.008793 0.000000 -0.007981 +v 0.008785 0.000000 -0.007950 +v -0.008713 0.000000 -0.007852 +v 0.008779 0.000000 -0.007924 +v 0.008776 0.000000 -0.007907 +v 0.008775 0.000000 -0.007901 +v 0.008737 0.000000 -0.007790 +v -0.003236 0.000000 -0.007258 +v -0.008669 0.000000 -0.007733 +v 0.008651 0.000000 -0.007547 +v -0.008624 0.000000 -0.007614 +v -0.008582 0.000000 -0.007502 +v 0.008520 0.000000 -0.007183 +v -0.008544 0.000000 -0.007404 +v -0.008515 0.000000 -0.007326 +v -0.008495 0.000000 -0.007272 +v -0.008488 0.000000 -0.007254 +v -0.002946 0.000000 -0.006315 +v -0.008398 0.000000 -0.007005 +v 0.002906 0.000000 -0.005507 +v 0.008346 0.000000 -0.006708 +v -0.008289 0.000000 -0.006708 +v -0.008152 0.000000 -0.006342 +v 0.008135 0.000000 -0.006132 +v -0.007981 0.000000 -0.005879 +v -0.002582 0.000000 -0.005142 +v 0.007891 0.000000 -0.005463 +v -0.007763 0.000000 -0.005298 +v 0.002433 0.000000 -0.003971 +v 0.007614 0.000000 -0.004711 +v -0.007491 0.000000 -0.004575 +v -0.002167 0.000000 -0.003818 +v 0.007310 0.000000 -0.003884 +v -0.007155 0.000000 -0.003683 +v 0.002003 0.000000 -0.002582 +v 0.006983 0.000000 -0.002995 +v -0.001727 0.000000 -0.002416 +v -0.006747 0.000000 -0.002602 +v 0.006633 0.000000 -0.002050 +v -0.006257 0.000000 -0.001307 +v 0.001618 0.000000 -0.001340 +v -0.001285 0.000000 -0.001018 +v 0.006267 0.000000 -0.001062 +v 0.001275 0.000000 -0.000247 +v -0.005676 -0.000000 0.000227 +v 0.005886 0.000000 -0.000037 +v -0.000866 -0.000000 0.000302 +v 0.000977 -0.000000 0.000699 +v 0.005506 -0.000000 0.000990 +v -0.004996 -0.000000 0.002024 +v -0.000494 -0.000000 0.001466 +v 0.000722 -0.000000 0.001497 +v 0.005137 -0.000000 0.001983 +v -0.000193 -0.000000 0.002399 +v 0.000510 -0.000000 0.002146 +v 0.004785 -0.000000 0.002930 +v -0.004207 -0.000000 0.004107 +v 0.000342 -0.000000 0.002647 +v 0.000013 -0.000000 0.003023 +v 0.000218 -0.000000 0.003000 +v 0.004453 -0.000000 0.003823 +v 0.000137 -0.000000 0.003205 +v 0.000099 -0.000000 0.003261 +v 0.004144 -0.000000 0.004653 +v -0.004177 -0.000000 0.004185 +v -0.004093 -0.000000 0.004406 +v -0.003968 -0.000000 0.004739 +v 0.003863 -0.000000 0.005409 +v -0.003810 -0.000000 0.005154 +v -0.003631 -0.000000 0.005627 +v 0.003612 -0.000000 0.006082 +v -0.003441 -0.000000 0.006129 +v 0.003396 -0.000000 0.006663 +v -0.003252 -0.000000 0.006630 +v -0.003073 -0.000000 0.007102 +v 0.003217 -0.000000 0.007143 +v -0.002915 -0.000000 0.007519 +v 0.003081 -0.000000 0.007511 +v 0.002991 -0.000000 0.007759 +v -0.002789 -0.000000 0.007850 +v 0.002949 -0.000000 0.007877 +v -0.002706 -0.000000 0.008071 +v 0.002825 -0.000000 0.008213 +v -0.002676 -0.000000 0.008151 +v 0.000074 -0.000000 0.008187 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.074 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 16//1 17//1 18//1 +f 16//1 19//1 17//1 +f 16//1 20//1 19//1 +f 1//1 21//1 15//1 +f 1//1 22//1 21//1 +f 1//1 23//1 22//1 +f 16//1 24//1 20//1 +f 1//1 25//1 23//1 +f 1//1 26//1 25//1 +f 1//1 27//1 26//1 +f 28//1 27//1 1//1 +f 28//1 29//1 27//1 +f 16//1 30//1 24//1 +f 28//1 31//1 29//1 +f 28//1 32//1 31//1 +f 28//1 33//1 32//1 +f 16//1 34//1 30//1 +f 28//1 35//1 33//1 +f 28//1 36//1 35//1 +f 37//1 36//1 28//1 +f 16//1 38//1 34//1 +f 16//1 39//1 38//1 +f 40//1 36//1 37//1 +f 16//1 41//1 39//1 +f 16//1 42//1 41//1 +f 16//1 43//1 42//1 +f 44//1 36//1 40//1 +f 16//1 45//1 43//1 +f 16//1 46//1 45//1 +f 16//1 47//1 46//1 +f 16//1 48//1 47//1 +f 44//1 49//1 36//1 +f 50//1 49//1 44//1 +f 16//1 51//1 48//1 +f 52//1 49//1 50//1 +f 53//1 49//1 52//1 +f 16//1 54//1 51//1 +f 55//1 49//1 53//1 +f 56//1 49//1 55//1 +f 57//1 49//1 56//1 +f 58//1 49//1 57//1 +f 58//1 59//1 49//1 +f 60//1 59//1 58//1 +f 61//1 54//1 16//1 +f 61//1 62//1 54//1 +f 63//1 59//1 60//1 +f 64//1 59//1 63//1 +f 61//1 65//1 62//1 +f 66//1 59//1 64//1 +f 66//1 67//1 59//1 +f 61//1 68//1 65//1 +f 69//1 67//1 66//1 +f 70//1 68//1 61//1 +f 70//1 71//1 68//1 +f 72//1 67//1 69//1 +f 72//1 73//1 67//1 +f 70//1 74//1 71//1 +f 75//1 73//1 72//1 +f 76//1 74//1 70//1 +f 76//1 77//1 74//1 +f 75//1 78//1 73//1 +f 79//1 78//1 75//1 +f 76//1 80//1 77//1 +f 81//1 78//1 79//1 +f 82//1 80//1 76//1 +f 81//1 83//1 78//1 +f 82//1 84//1 80//1 +f 85//1 84//1 82//1 +f 86//1 83//1 81//1 +f 85//1 87//1 84//1 +f 86//1 88//1 83//1 +f 89//1 87//1 85//1 +f 89//1 90//1 87//1 +f 91//1 88//1 86//1 +f 91//1 92//1 88//1 +f 93//1 90//1 89//1 +f 93//1 94//1 90//1 +f 91//1 95//1 92//1 +f 96//1 94//1 93//1 +f 96//1 97//1 94//1 +f 98//1 95//1 91//1 +f 99//1 97//1 96//1 +f 98//1 100//1 95//1 +f 101//1 97//1 99//1 +f 101//1 102//1 97//1 +f 103//1 102//1 101//1 +f 98//1 104//1 100//1 +f 104//1 102//1 103//1 +f 98//1 102//1 104//1 +f 98//1 105//1 102//1 +f 106//1 105//1 98//1 +f 107//1 105//1 106//1 +f 108//1 105//1 107//1 +f 108//1 109//1 105//1 +f 110//1 109//1 108//1 +f 111//1 109//1 110//1 +f 111//1 112//1 109//1 +f 113//1 112//1 111//1 +f 113//1 114//1 112//1 +f 115//1 114//1 113//1 +f 116//1 114//1 115//1 +f 116//1 117//1 114//1 +f 118//1 117//1 116//1 +f 118//1 119//1 117//1 +f 118//1 120//1 119//1 +f 121//1 120//1 118//1 +f 121//1 122//1 120//1 +f 123//1 122//1 121//1 +f 123//1 124//1 122//1 +f 125//1 124//1 123//1 +f 126//1 124//1 125//1 diff --git a/alphanumeric/W.mtl b/alphanumeric/W.mtl new file mode 100644 index 0000000..1eafd81 --- /dev/null +++ b/alphanumeric/W.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.076 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/W.obj b/alphanumeric/W.obj new file mode 100644 index 0000000..b8df789 --- /dev/null +++ b/alphanumeric/W.obj @@ -0,0 +1,483 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib W.mtl +o W +v -0.003411 0.000000 -0.004641 +v -0.000013 0.000000 -0.008187 +v -0.002426 0.000000 -0.008212 +v 0.006185 0.000000 -0.005761 +v 0.009047 0.000000 -0.008212 +v 0.006646 0.000000 -0.008212 +v 0.011449 0.000000 -0.008212 +v 0.011448 0.000000 -0.008207 +v 0.011445 0.000000 -0.008192 +v -0.010510 0.000000 -0.008194 +v -0.009886 0.000000 -0.008195 +v -0.010221 0.000000 -0.008196 +v -0.009499 0.000000 -0.008192 +v -0.010754 0.000000 -0.008189 +v -0.009061 0.000000 -0.008187 +v 0.011440 0.000000 -0.008169 +v -0.010956 0.000000 -0.008181 +v -0.006659 0.000000 -0.008150 +v 0.002414 0.000000 -0.008150 +v -0.011118 0.000000 -0.008170 +v -0.011246 0.000000 -0.008156 +v 0.011433 0.000000 -0.008141 +v -0.011339 0.000000 -0.008138 +v -0.006644 0.000000 -0.008070 +v 0.002439 0.000000 -0.008063 +v 0.011427 0.000000 -0.008109 +v -0.011402 0.000000 -0.008117 +v -0.011439 0.000000 -0.008092 +v 0.011418 0.000000 -0.008075 +v -0.011449 0.000000 -0.008063 +v 0.011412 0.000000 -0.008041 +v -0.006601 0.000000 -0.007850 +v 0.002504 0.000000 -0.007824 +v -0.011433 0.000000 -0.007968 +v 0.011404 0.000000 -0.008009 +v 0.011397 0.000000 -0.007981 +v 0.011393 0.000000 -0.007959 +v -0.011386 0.000000 -0.007733 +v 0.011389 0.000000 -0.007944 +v 0.011387 0.000000 -0.007938 +v 0.011375 0.000000 -0.007886 +v 0.011359 0.000000 -0.007809 +v -0.006537 0.000000 -0.007518 +v 0.002603 0.000000 -0.007462 +v 0.011339 0.000000 -0.007712 +v -0.011308 0.000000 -0.007368 +v 0.011313 0.000000 -0.007595 +v 0.011285 0.000000 -0.007462 +v -0.006458 0.000000 -0.007101 +v 0.011254 0.000000 -0.007313 +v 0.002727 0.000000 -0.007008 +v -0.011204 0.000000 -0.006883 +v 0.011219 0.000000 -0.007151 +v 0.011184 0.000000 -0.006979 +v -0.006369 0.000000 -0.006629 +v 0.002869 0.000000 -0.006493 +v 0.011146 0.000000 -0.006798 +v -0.011075 0.000000 -0.006288 +v 0.011107 0.000000 -0.006609 +v -0.006272 0.000000 -0.006128 +v 0.011068 0.000000 -0.006417 +v 0.003019 0.000000 -0.005948 +v 0.011026 0.000000 -0.006221 +v -0.010922 0.000000 -0.005593 +v 0.010862 0.000000 -0.005463 +v -0.006177 0.000000 -0.005627 +v 0.003168 0.000000 -0.005402 +v 0.006117 0.000000 -0.005409 +v -0.006086 0.000000 -0.005154 +v -0.010750 0.000000 -0.004808 +v 0.010626 0.000000 -0.004386 +v 0.006045 0.000000 -0.005035 +v 0.003308 0.000000 -0.004887 +v -0.006007 0.000000 -0.004738 +v 0.005970 0.000000 -0.004644 +v 0.003433 0.000000 -0.004433 +v -0.010560 0.000000 -0.003942 +v -0.005943 0.000000 -0.004406 +v 0.005894 0.000000 -0.004242 +v -0.003547 0.000000 -0.004146 +v 0.003533 0.000000 -0.004071 +v -0.005902 0.000000 -0.004186 +v 0.010337 0.000000 -0.003063 +v 0.005815 0.000000 -0.003835 +v -0.005887 0.000000 -0.004106 +v -0.003683 0.000000 -0.003655 +v -0.005784 0.000000 -0.003554 +v 0.003598 0.000000 -0.003832 +v -0.010352 0.000000 -0.003006 +v 0.005737 0.000000 -0.003426 +v 0.003621 0.000000 -0.003745 +v 0.003788 0.000000 -0.003151 +v -0.003816 0.000000 -0.003172 +v -0.005685 0.000000 -0.003017 +v 0.005660 0.000000 -0.003023 +v -0.003946 0.000000 -0.002704 +v 0.003948 0.000000 -0.002580 +v 0.010008 0.000000 -0.001566 +v 0.005586 0.000000 -0.002630 +v -0.005588 0.000000 -0.002500 +v -0.010131 0.000000 -0.002010 +v -0.004070 0.000000 -0.002255 +v 0.005513 0.000000 -0.002253 +v 0.004100 0.000000 -0.002036 +v -0.005495 0.000000 -0.002007 +v -0.004188 0.000000 -0.001832 +v 0.005445 0.000000 -0.001896 +v 0.000024 0.000000 -0.001680 +v 0.004244 0.000000 -0.001526 +v -0.009899 0.000000 -0.000963 +v -0.005408 0.000000 -0.001545 +v 0.005383 0.000000 -0.001567 +v -0.004298 0.000000 -0.001439 +v 0.000002 0.000000 -0.001672 +v 0.000052 0.000000 -0.001611 +v -0.000038 0.000000 -0.001593 +v 0.000102 0.000000 -0.001459 +v -0.000096 0.000000 -0.001447 +v 0.005327 0.000000 -0.001269 +v 0.009658 -0.000000 0.000032 +v -0.005329 0.000000 -0.001120 +v 0.004378 0.000000 -0.001054 +v 0.000173 0.000000 -0.001231 +v -0.000171 0.000000 -0.001234 +v -0.004397 0.000000 -0.001082 +v 0.005274 0.000000 -0.000994 +v -0.000262 0.000000 -0.000957 +v 0.000264 0.000000 -0.000930 +v -0.005255 0.000000 -0.000735 +v -0.004484 0.000000 -0.000767 +v 0.004500 0.000000 -0.000627 +v 0.005224 0.000000 -0.000732 +v -0.009658 -0.000000 0.000125 +v -0.000369 0.000000 -0.000619 +v 0.000372 0.000000 -0.000565 +v -0.004560 0.000000 -0.000499 +v -0.005191 0.000000 -0.000398 +v 0.005174 0.000000 -0.000484 +v 0.004608 0.000000 -0.000249 +v -0.000491 0.000000 -0.000221 +v 0.000496 0.000000 -0.000140 +v -0.004622 0.000000 -0.000283 +v 0.005129 0.000000 -0.000253 +v -0.005135 0.000000 -0.000113 +v -0.004668 0.000000 -0.000124 +v 0.005085 0.000000 -0.000041 +v 0.004701 -0.000000 0.000074 +v -0.000626 -0.000000 0.000235 +v 0.000634 -0.000000 0.000338 +v -0.004730 -0.000000 0.000087 +v -0.005092 -0.000000 0.000114 +v 0.005044 -0.000000 0.000149 +v 0.009299 -0.000000 0.001660 +v 0.004777 -0.000000 0.000337 +v -0.004783 -0.000000 0.000262 +v -0.005060 -0.000000 0.000278 +v -0.009623 -0.000000 0.000283 +v 0.005007 -0.000000 0.000316 +v -0.000775 -0.000000 0.000746 +v -0.004829 -0.000000 0.000401 +v -0.005040 -0.000000 0.000373 +v -0.009526 -0.000000 0.000719 +v 0.004974 -0.000000 0.000456 +v 0.004835 -0.000000 0.000534 +v 0.000785 -0.000000 0.000864 +v -0.005019 -0.000000 0.000470 +v -0.004866 -0.000000 0.000508 +v 0.004945 -0.000000 0.000568 +v -0.005002 -0.000000 0.000547 +v -0.004897 -0.000000 0.000583 +v 0.004874 -0.000000 0.000660 +v -0.004984 -0.000000 0.000604 +v 0.004922 -0.000000 0.000649 +v -0.004924 -0.000000 0.000629 +v -0.004965 -0.000000 0.000637 +v -0.004947 -0.000000 0.000646 +v 0.004903 -0.000000 0.000697 +v 0.004891 -0.000000 0.000709 +v -0.009379 -0.000000 0.001379 +v -0.000937 -0.000000 0.001310 +v 0.000947 -0.000000 0.001431 +v -0.001110 -0.000000 0.001924 +v -0.009197 -0.000000 0.002205 +v 0.001119 -0.000000 0.002034 +v 0.008950 -0.000000 0.003245 +v -0.001295 -0.000000 0.002588 +v 0.001296 -0.000000 0.002667 +v -0.008988 -0.000000 0.003143 +v -0.001462 -0.000000 0.003193 +v 0.001481 -0.000000 0.003322 +v -0.008767 -0.000000 0.004137 +v -0.001624 -0.000000 0.003785 +v 0.008627 -0.000000 0.004715 +v 0.001507 -0.000000 0.003417 +v 0.001581 -0.000000 0.003680 +v 0.001692 -0.000000 0.004077 +v -0.001783 -0.000000 0.004357 +v 0.001832 -0.000000 0.004574 +v -0.008547 -0.000000 0.005131 +v -0.001935 -0.000000 0.004904 +v 0.001992 -0.000000 0.005138 +v 0.008341 -0.000000 0.005997 +v -0.002079 -0.000000 0.005419 +v -0.008340 -0.000000 0.006070 +v 0.002160 -0.000000 0.005736 +v -0.002212 -0.000000 0.005897 +v 0.002327 -0.000000 0.006334 +v -0.002333 -0.000000 0.006330 +v 0.008114 -0.000000 0.007021 +v -0.008156 -0.000000 0.006896 +v -0.002442 -0.000000 0.006714 +v 0.002485 -0.000000 0.006899 +v -0.002534 -0.000000 0.007041 +v -0.008009 -0.000000 0.007556 +v 0.002626 -0.000000 0.007396 +v 0.007958 -0.000000 0.007713 +v -0.002608 -0.000000 0.007307 +v -0.002664 -0.000000 0.007504 +v 0.002738 -0.000000 0.007793 +v -0.002700 -0.000000 0.007628 +v -0.007912 -0.000000 0.007992 +v -0.002875 -0.000000 0.008212 +v 0.007891 -0.000000 0.008001 +v 0.002810 -0.000000 0.008055 +v -0.007877 -0.000000 0.008150 +v 0.007870 -0.000000 0.008047 +v 0.007839 -0.000000 0.008086 +v 0.002838 -0.000000 0.008150 +v 0.007788 -0.000000 0.008118 +v 0.007709 -0.000000 0.008143 +v 0.007600 -0.000000 0.008163 +v -0.005377 -0.000000 0.008188 +v 0.005327 -0.000000 0.008188 +v 0.007449 -0.000000 0.008178 +v 0.007252 -0.000000 0.008188 +v 0.005859 -0.000000 0.008193 +v 0.007000 -0.000000 0.008194 +v 0.006311 -0.000000 0.008196 +v 0.006690 -0.000000 0.008197 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.076 +s 1 +f 1//1 2//1 3//1 +f 4//1 5//1 6//1 +f 4//1 7//1 5//1 +f 4//1 8//1 7//1 +f 4//1 9//1 8//1 +f 10//1 11//1 12//1 +f 10//1 13//1 11//1 +f 14//1 13//1 10//1 +f 14//1 15//1 13//1 +f 4//1 16//1 9//1 +f 17//1 15//1 14//1 +f 17//1 18//1 15//1 +f 1//1 19//1 2//1 +f 20//1 18//1 17//1 +f 21//1 18//1 20//1 +f 4//1 22//1 16//1 +f 23//1 18//1 21//1 +f 23//1 24//1 18//1 +f 1//1 25//1 19//1 +f 4//1 26//1 22//1 +f 27//1 24//1 23//1 +f 28//1 24//1 27//1 +f 4//1 29//1 26//1 +f 30//1 24//1 28//1 +f 4//1 31//1 29//1 +f 30//1 32//1 24//1 +f 1//1 33//1 25//1 +f 34//1 32//1 30//1 +f 4//1 35//1 31//1 +f 4//1 36//1 35//1 +f 4//1 37//1 36//1 +f 38//1 32//1 34//1 +f 4//1 39//1 37//1 +f 4//1 40//1 39//1 +f 4//1 41//1 40//1 +f 4//1 42//1 41//1 +f 38//1 43//1 32//1 +f 1//1 44//1 33//1 +f 4//1 45//1 42//1 +f 46//1 43//1 38//1 +f 4//1 47//1 45//1 +f 4//1 48//1 47//1 +f 46//1 49//1 43//1 +f 4//1 50//1 48//1 +f 1//1 51//1 44//1 +f 52//1 49//1 46//1 +f 4//1 53//1 50//1 +f 4//1 54//1 53//1 +f 52//1 55//1 49//1 +f 1//1 56//1 51//1 +f 4//1 57//1 54//1 +f 58//1 55//1 52//1 +f 4//1 59//1 57//1 +f 58//1 60//1 55//1 +f 4//1 61//1 59//1 +f 1//1 62//1 56//1 +f 4//1 63//1 61//1 +f 64//1 60//1 58//1 +f 4//1 65//1 63//1 +f 64//1 66//1 60//1 +f 1//1 67//1 62//1 +f 68//1 65//1 4//1 +f 64//1 69//1 66//1 +f 70//1 69//1 64//1 +f 68//1 71//1 65//1 +f 72//1 71//1 68//1 +f 1//1 73//1 67//1 +f 70//1 74//1 69//1 +f 75//1 71//1 72//1 +f 1//1 76//1 73//1 +f 77//1 74//1 70//1 +f 77//1 78//1 74//1 +f 79//1 71//1 75//1 +f 80//1 76//1 1//1 +f 80//1 81//1 76//1 +f 77//1 82//1 78//1 +f 79//1 83//1 71//1 +f 84//1 83//1 79//1 +f 77//1 85//1 82//1 +f 86//1 81//1 80//1 +f 77//1 87//1 85//1 +f 86//1 88//1 81//1 +f 89//1 87//1 77//1 +f 90//1 83//1 84//1 +f 86//1 91//1 88//1 +f 86//1 92//1 91//1 +f 93//1 92//1 86//1 +f 89//1 94//1 87//1 +f 95//1 83//1 90//1 +f 96//1 92//1 93//1 +f 96//1 97//1 92//1 +f 95//1 98//1 83//1 +f 99//1 98//1 95//1 +f 89//1 100//1 94//1 +f 101//1 100//1 89//1 +f 102//1 97//1 96//1 +f 103//1 98//1 99//1 +f 102//1 104//1 97//1 +f 101//1 105//1 100//1 +f 106//1 104//1 102//1 +f 107//1 98//1 103//1 +f 106//1 108//1 104//1 +f 108//1 109//1 104//1 +f 110//1 105//1 101//1 +f 110//1 111//1 105//1 +f 112//1 98//1 107//1 +f 113//1 108//1 106//1 +f 113//1 114//1 108//1 +f 115//1 109//1 108//1 +f 113//1 116//1 114//1 +f 117//1 109//1 115//1 +f 113//1 118//1 116//1 +f 119//1 98//1 112//1 +f 119//1 120//1 98//1 +f 110//1 121//1 111//1 +f 117//1 122//1 109//1 +f 123//1 122//1 117//1 +f 113//1 124//1 118//1 +f 125//1 124//1 113//1 +f 126//1 120//1 119//1 +f 125//1 127//1 124//1 +f 128//1 122//1 123//1 +f 110//1 129//1 121//1 +f 130//1 127//1 125//1 +f 128//1 131//1 122//1 +f 132//1 120//1 126//1 +f 133//1 129//1 110//1 +f 130//1 134//1 127//1 +f 135//1 131//1 128//1 +f 136//1 134//1 130//1 +f 133//1 137//1 129//1 +f 138//1 120//1 132//1 +f 135//1 139//1 131//1 +f 136//1 140//1 134//1 +f 141//1 139//1 135//1 +f 142//1 140//1 136//1 +f 143//1 120//1 138//1 +f 133//1 144//1 137//1 +f 145//1 140//1 142//1 +f 146//1 120//1 143//1 +f 141//1 147//1 139//1 +f 145//1 148//1 140//1 +f 149//1 147//1 141//1 +f 150//1 148//1 145//1 +f 133//1 151//1 144//1 +f 152//1 120//1 146//1 +f 152//1 153//1 120//1 +f 149//1 154//1 147//1 +f 155//1 148//1 150//1 +f 133//1 156//1 151//1 +f 157//1 156//1 133//1 +f 158//1 153//1 152//1 +f 155//1 159//1 148//1 +f 160//1 159//1 155//1 +f 157//1 161//1 156//1 +f 162//1 161//1 157//1 +f 163//1 153//1 158//1 +f 149//1 164//1 154//1 +f 165//1 164//1 149//1 +f 162//1 166//1 161//1 +f 167//1 159//1 160//1 +f 168//1 153//1 163//1 +f 162//1 169//1 166//1 +f 170//1 159//1 167//1 +f 165//1 171//1 164//1 +f 162//1 172//1 169//1 +f 173//1 153//1 168//1 +f 174//1 159//1 170//1 +f 162//1 175//1 172//1 +f 176//1 159//1 174//1 +f 162//1 176//1 175//1 +f 162//1 159//1 176//1 +f 177//1 153//1 173//1 +f 165//1 178//1 171//1 +f 178//1 153//1 177//1 +f 165//1 153//1 178//1 +f 179//1 159//1 162//1 +f 179//1 180//1 159//1 +f 181//1 153//1 165//1 +f 179//1 182//1 180//1 +f 183//1 182//1 179//1 +f 184//1 153//1 181//1 +f 184//1 185//1 153//1 +f 183//1 186//1 182//1 +f 187//1 185//1 184//1 +f 188//1 186//1 183//1 +f 188//1 189//1 186//1 +f 190//1 185//1 187//1 +f 191//1 189//1 188//1 +f 191//1 192//1 189//1 +f 190//1 193//1 185//1 +f 194//1 193//1 190//1 +f 195//1 193//1 194//1 +f 196//1 193//1 195//1 +f 191//1 197//1 192//1 +f 198//1 193//1 196//1 +f 199//1 197//1 191//1 +f 199//1 200//1 197//1 +f 201//1 193//1 198//1 +f 201//1 202//1 193//1 +f 199//1 203//1 200//1 +f 204//1 203//1 199//1 +f 205//1 202//1 201//1 +f 204//1 206//1 203//1 +f 207//1 202//1 205//1 +f 204//1 208//1 206//1 +f 207//1 209//1 202//1 +f 210//1 208//1 204//1 +f 210//1 211//1 208//1 +f 212//1 209//1 207//1 +f 210//1 213//1 211//1 +f 214//1 213//1 210//1 +f 215//1 209//1 212//1 +f 215//1 216//1 209//1 +f 214//1 217//1 213//1 +f 214//1 218//1 217//1 +f 219//1 216//1 215//1 +f 214//1 220//1 218//1 +f 221//1 220//1 214//1 +f 221//1 222//1 220//1 +f 219//1 223//1 216//1 +f 224//1 223//1 219//1 +f 225//1 222//1 221//1 +f 224//1 226//1 223//1 +f 224//1 227//1 226//1 +f 228//1 227//1 224//1 +f 228//1 229//1 227//1 +f 228//1 230//1 229//1 +f 228//1 231//1 230//1 +f 232//1 222//1 225//1 +f 233//1 231//1 228//1 +f 233//1 234//1 231//1 +f 233//1 235//1 234//1 +f 236//1 235//1 233//1 +f 236//1 237//1 235//1 +f 238//1 237//1 236//1 +f 238//1 239//1 237//1 diff --git a/alphanumeric/X.mtl b/alphanumeric/X.mtl new file mode 100644 index 0000000..61be050 --- /dev/null +++ b/alphanumeric/X.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.079 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/X.obj b/alphanumeric/X.obj new file mode 100644 index 0000000..fffd07e --- /dev/null +++ b/alphanumeric/X.obj @@ -0,0 +1,450 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib X.mtl +o X +v 0.001573 0.000000 -0.005712 +v 0.003021 0.000000 -0.008213 +v 0.002966 0.000000 -0.008213 +v 0.003170 0.000000 -0.008213 +v 0.003396 0.000000 -0.008213 +v 0.003680 0.000000 -0.008213 +v 0.004000 0.000000 -0.008213 +v 0.004342 0.000000 -0.008213 +v 0.004682 0.000000 -0.008213 +v 0.005004 0.000000 -0.008213 +v 0.005287 0.000000 -0.008213 +v 0.005513 0.000000 -0.008213 +v 0.005662 0.000000 -0.008213 +v 0.005717 0.000000 -0.008213 +v 0.006249 0.000000 -0.008212 +v 0.006708 0.000000 -0.008210 +v 0.007103 0.000000 -0.008206 +v 0.007434 0.000000 -0.008201 +v 0.007707 0.000000 -0.008191 +v 0.007927 0.000000 -0.008181 +v -0.006640 0.000000 -0.008190 +v -0.005906 0.000000 -0.008190 +v -0.006289 0.000000 -0.008190 +v -0.005497 0.000000 -0.008187 +v -0.006963 0.000000 -0.008186 +v -0.002746 0.000000 -0.008150 +v -0.007251 0.000000 -0.008183 +v -0.007506 0.000000 -0.008176 +v 0.008098 0.000000 -0.008167 +v -0.007727 0.000000 -0.008168 +v -0.007910 0.000000 -0.008159 +v 0.008227 0.000000 -0.008150 +v -0.008056 0.000000 -0.008147 +v -0.002719 0.000000 -0.008104 +v 0.008315 0.000000 -0.008128 +v -0.008162 0.000000 -0.008133 +v -0.008226 0.000000 -0.008117 +v 0.008369 0.000000 -0.008102 +v -0.008248 0.000000 -0.008101 +v -0.002645 0.000000 -0.007974 +v 0.008394 0.000000 -0.008073 +v -0.008225 0.000000 -0.008050 +v 0.008392 0.000000 -0.008038 +v -0.008158 0.000000 -0.007934 +v 0.008361 0.000000 -0.007982 +v 0.008289 0.000000 -0.007862 +v -0.002532 0.000000 -0.007781 +v -0.008050 0.000000 -0.007756 +v 0.008178 0.000000 -0.007682 +v -0.002391 0.000000 -0.007537 +v -0.007907 0.000000 -0.007521 +v 0.008031 0.000000 -0.007447 +v -0.002231 0.000000 -0.007260 +v -0.007727 0.000000 -0.007235 +v 0.007852 0.000000 -0.007161 +v -0.002061 0.000000 -0.006967 +v -0.007517 0.000000 -0.006901 +v 0.007643 0.000000 -0.006830 +v -0.001892 0.000000 -0.006675 +v -0.007276 0.000000 -0.006526 +v 0.007407 0.000000 -0.006458 +v -0.001732 0.000000 -0.006398 +v -0.007012 0.000000 -0.006111 +v 0.007149 0.000000 -0.006049 +v -0.001591 0.000000 -0.006156 +v -0.001478 0.000000 -0.005960 +v -0.006724 0.000000 -0.005666 +v 0.006870 0.000000 -0.005610 +v -0.001404 0.000000 -0.005832 +v -0.001377 0.000000 -0.005786 +v -0.001193 0.000000 -0.005466 +v 0.001385 0.000000 -0.005375 +v -0.006418 0.000000 -0.005193 +v 0.006574 0.000000 -0.005145 +v -0.001014 0.000000 -0.005158 +v 0.001203 0.000000 -0.005054 +v -0.006094 0.000000 -0.004696 +v -0.000843 0.000000 -0.004864 +v 0.006263 0.000000 -0.004659 +v 0.001028 0.000000 -0.004752 +v -0.000681 0.000000 -0.004585 +v 0.000862 0.000000 -0.004469 +v -0.005758 0.000000 -0.004180 +v 0.005941 0.000000 -0.004155 +v -0.000531 0.000000 -0.004328 +v 0.000707 0.000000 -0.004211 +v -0.000392 0.000000 -0.004091 +v 0.000564 0.000000 -0.003980 +v -0.005421 0.000000 -0.003666 +v 0.005616 0.000000 -0.003649 +v -0.000268 0.000000 -0.003883 +v 0.000437 0.000000 -0.003779 +v -0.000160 0.000000 -0.003701 +v 0.000325 0.000000 -0.003610 +v -0.000070 0.000000 -0.003552 +v -0.005098 0.000000 -0.003171 +v 0.005304 0.000000 -0.003162 +v 0.000232 0.000000 -0.003477 +v 0.000002 0.000000 -0.003436 +v 0.000159 0.000000 -0.003383 +v 0.000052 0.000000 -0.003359 +v 0.000107 0.000000 -0.003330 +v 0.000079 0.000000 -0.003322 +v -0.004792 0.000000 -0.002699 +v 0.005007 0.000000 -0.002697 +v -0.004504 0.000000 -0.002257 +v 0.004727 0.000000 -0.002260 +v 0.004469 0.000000 -0.001855 +v -0.004240 0.000000 -0.001847 +v 0.004233 0.000000 -0.001486 +v -0.004000 0.000000 -0.001476 +v 0.004027 0.000000 -0.001158 +v -0.003789 0.000000 -0.001148 +v 0.003848 0.000000 -0.000876 +v -0.003610 0.000000 -0.000867 +v 0.003703 0.000000 -0.000644 +v -0.003465 0.000000 -0.000638 +v 0.003592 0.000000 -0.000468 +v -0.003358 0.000000 -0.000466 +v 0.003520 0.000000 -0.000351 +v -0.003292 0.000000 -0.000355 +v -0.003269 0.000000 -0.000311 +v 0.003489 0.000000 -0.000299 +v -0.003294 0.000000 -0.000263 +v 0.003507 0.000000 -0.000244 +v -0.003369 0.000000 -0.000142 +v 0.003572 0.000000 -0.000119 +v -0.003489 -0.000000 0.000048 +v 0.003681 -0.000000 0.000071 +v -0.003652 -0.000000 0.000301 +v 0.003829 -0.000000 0.000320 +v -0.003853 -0.000000 0.000612 +v 0.004016 -0.000000 0.000624 +v -0.004091 -0.000000 0.000977 +v 0.004236 -0.000000 0.000979 +v -0.004360 -0.000000 0.001388 +v 0.004487 -0.000000 0.001378 +v 0.004767 -0.000000 0.001815 +v -0.004657 -0.000000 0.001843 +v 0.005071 -0.000000 0.002289 +v -0.004980 -0.000000 0.002334 +v 0.005397 -0.000000 0.002793 +v -0.005326 -0.000000 0.002858 +v 0.005742 -0.000000 0.003322 +v -0.005690 -0.000000 0.003410 +v 0.000054 -0.000000 0.003174 +v 0.000025 -0.000000 0.003197 +v 0.000081 -0.000000 0.003197 +v 0.000134 -0.000000 0.003264 +v -0.000031 -0.000000 0.003264 +v 0.000213 -0.000000 0.003371 +v -0.000112 -0.000000 0.003371 +v 0.006103 -0.000000 0.003870 +v 0.000314 -0.000000 0.003518 +v -0.000215 -0.000000 0.003518 +v -0.006069 -0.000000 0.003981 +v 0.000436 -0.000000 0.003699 +v -0.000339 -0.000000 0.003699 +v 0.000577 -0.000000 0.003913 +v -0.000481 -0.000000 0.003913 +v 0.006462 -0.000000 0.004416 +v 0.000734 -0.000000 0.004157 +v -0.000640 -0.000000 0.004157 +v -0.006124 -0.000000 0.004066 +v -0.006275 -0.000000 0.004295 +v 0.000906 -0.000000 0.004424 +v -0.000813 -0.000000 0.004424 +v -0.006506 -0.000000 0.004643 +v 0.006808 -0.000000 0.004943 +v 0.001090 -0.000000 0.004717 +v -0.000999 -0.000000 0.004717 +v -0.006792 -0.000000 0.005079 +v 0.001285 -0.000000 0.005028 +v -0.001196 -0.000000 0.005028 +v 0.007138 -0.000000 0.005447 +v 0.001488 -0.000000 0.005356 +v -0.001401 -0.000000 0.005356 +v -0.007118 -0.000000 0.005573 +v 0.001697 -0.000000 0.005699 +v -0.001613 -0.000000 0.005699 +v 0.007449 -0.000000 0.005921 +v -0.007463 -0.000000 0.006097 +v -0.003169 -0.000000 0.008213 +v 0.001727 -0.000000 0.005749 +v 0.001811 -0.000000 0.005885 +v 0.001938 -0.000000 0.006092 +v 0.007738 -0.000000 0.006361 +v 0.002097 -0.000000 0.006351 +v -0.007808 -0.000000 0.006622 +v 0.002278 -0.000000 0.006644 +v 0.008001 -0.000000 0.006762 +v -0.008134 -0.000000 0.007116 +v 0.002469 -0.000000 0.006955 +v 0.008235 -0.000000 0.007120 +v 0.002660 -0.000000 0.007267 +v -0.008421 -0.000000 0.007552 +v 0.008436 -0.000000 0.007426 +v 0.002840 -0.000000 0.007560 +v 0.008601 -0.000000 0.007678 +v -0.008651 -0.000000 0.007899 +v 0.002999 -0.000000 0.007819 +v 0.008729 -0.000000 0.007869 +v 0.003126 -0.000000 0.008027 +v 0.008814 -0.000000 0.007996 +v -0.008802 -0.000000 0.008129 +v 0.008853 -0.000000 0.008051 +v 0.003210 -0.000000 0.008163 +v 0.008857 -0.000000 0.008081 +v 0.008831 -0.000000 0.008105 +v 0.008770 -0.000000 0.008128 +v 0.008670 -0.000000 0.008147 +v -0.008857 -0.000000 0.008213 +v 0.008530 -0.000000 0.008164 +v 0.003240 -0.000000 0.008213 +v 0.008344 -0.000000 0.008178 +v 0.008110 -0.000000 0.008190 +v 0.007824 -0.000000 0.008198 +v 0.007483 -0.000000 0.008205 +v 0.007082 -0.000000 0.008209 +v 0.006619 -0.000000 0.008212 +v 0.006090 -0.000000 0.008213 +v -0.006007 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.079 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 1//1 20//1 19//1 +f 21//1 22//1 23//1 +f 21//1 24//1 22//1 +f 25//1 24//1 21//1 +f 25//1 26//1 24//1 +f 27//1 26//1 25//1 +f 28//1 26//1 27//1 +f 1//1 29//1 20//1 +f 30//1 26//1 28//1 +f 31//1 26//1 30//1 +f 1//1 32//1 29//1 +f 33//1 26//1 31//1 +f 33//1 34//1 26//1 +f 1//1 35//1 32//1 +f 36//1 34//1 33//1 +f 37//1 34//1 36//1 +f 1//1 38//1 35//1 +f 39//1 34//1 37//1 +f 39//1 40//1 34//1 +f 1//1 41//1 38//1 +f 42//1 40//1 39//1 +f 1//1 43//1 41//1 +f 44//1 40//1 42//1 +f 1//1 45//1 43//1 +f 1//1 46//1 45//1 +f 44//1 47//1 40//1 +f 48//1 47//1 44//1 +f 1//1 49//1 46//1 +f 48//1 50//1 47//1 +f 51//1 50//1 48//1 +f 1//1 52//1 49//1 +f 51//1 53//1 50//1 +f 54//1 53//1 51//1 +f 1//1 55//1 52//1 +f 54//1 56//1 53//1 +f 57//1 56//1 54//1 +f 1//1 58//1 55//1 +f 57//1 59//1 56//1 +f 60//1 59//1 57//1 +f 1//1 61//1 58//1 +f 60//1 62//1 59//1 +f 63//1 62//1 60//1 +f 1//1 64//1 61//1 +f 63//1 65//1 62//1 +f 63//1 66//1 65//1 +f 67//1 66//1 63//1 +f 1//1 68//1 64//1 +f 67//1 69//1 66//1 +f 67//1 70//1 69//1 +f 67//1 71//1 70//1 +f 72//1 68//1 1//1 +f 73//1 71//1 67//1 +f 72//1 74//1 68//1 +f 73//1 75//1 71//1 +f 76//1 74//1 72//1 +f 77//1 75//1 73//1 +f 77//1 78//1 75//1 +f 76//1 79//1 74//1 +f 80//1 79//1 76//1 +f 77//1 81//1 78//1 +f 82//1 79//1 80//1 +f 83//1 81//1 77//1 +f 82//1 84//1 79//1 +f 83//1 85//1 81//1 +f 86//1 84//1 82//1 +f 83//1 87//1 85//1 +f 88//1 84//1 86//1 +f 89//1 87//1 83//1 +f 88//1 90//1 84//1 +f 89//1 91//1 87//1 +f 92//1 90//1 88//1 +f 89//1 93//1 91//1 +f 94//1 90//1 92//1 +f 89//1 95//1 93//1 +f 96//1 95//1 89//1 +f 94//1 97//1 90//1 +f 98//1 97//1 94//1 +f 96//1 99//1 95//1 +f 100//1 97//1 98//1 +f 96//1 101//1 99//1 +f 102//1 97//1 100//1 +f 96//1 103//1 101//1 +f 103//1 97//1 102//1 +f 96//1 97//1 103//1 +f 104//1 97//1 96//1 +f 104//1 105//1 97//1 +f 106//1 105//1 104//1 +f 106//1 107//1 105//1 +f 106//1 108//1 107//1 +f 109//1 108//1 106//1 +f 109//1 110//1 108//1 +f 111//1 110//1 109//1 +f 111//1 112//1 110//1 +f 113//1 112//1 111//1 +f 113//1 114//1 112//1 +f 115//1 114//1 113//1 +f 115//1 116//1 114//1 +f 117//1 116//1 115//1 +f 117//1 118//1 116//1 +f 119//1 118//1 117//1 +f 119//1 120//1 118//1 +f 121//1 120//1 119//1 +f 122//1 120//1 121//1 +f 122//1 123//1 120//1 +f 124//1 123//1 122//1 +f 124//1 125//1 123//1 +f 126//1 125//1 124//1 +f 126//1 127//1 125//1 +f 128//1 127//1 126//1 +f 128//1 129//1 127//1 +f 130//1 129//1 128//1 +f 130//1 131//1 129//1 +f 132//1 131//1 130//1 +f 132//1 133//1 131//1 +f 134//1 133//1 132//1 +f 134//1 135//1 133//1 +f 136//1 135//1 134//1 +f 136//1 137//1 135//1 +f 136//1 138//1 137//1 +f 139//1 138//1 136//1 +f 139//1 140//1 138//1 +f 141//1 140//1 139//1 +f 141//1 142//1 140//1 +f 143//1 142//1 141//1 +f 143//1 144//1 142//1 +f 145//1 146//1 143//1 +f 146//1 144//1 143//1 +f 145//1 147//1 146//1 +f 148//1 144//1 146//1 +f 149//1 144//1 148//1 +f 145//1 150//1 147//1 +f 151//1 144//1 149//1 +f 145//1 152//1 150//1 +f 151//1 153//1 144//1 +f 154//1 153//1 151//1 +f 145//1 155//1 152//1 +f 156//1 155//1 145//1 +f 157//1 153//1 154//1 +f 156//1 158//1 155//1 +f 159//1 153//1 157//1 +f 156//1 160//1 158//1 +f 159//1 161//1 153//1 +f 162//1 161//1 159//1 +f 156//1 163//1 160//1 +f 164//1 163//1 156//1 +f 165//1 163//1 164//1 +f 166//1 161//1 162//1 +f 165//1 167//1 163//1 +f 168//1 167//1 165//1 +f 166//1 169//1 161//1 +f 170//1 169//1 166//1 +f 168//1 171//1 167//1 +f 172//1 171//1 168//1 +f 173//1 169//1 170//1 +f 172//1 174//1 171//1 +f 173//1 175//1 169//1 +f 176//1 175//1 173//1 +f 172//1 177//1 174//1 +f 178//1 177//1 172//1 +f 179//1 175//1 176//1 +f 178//1 180//1 177//1 +f 179//1 181//1 175//1 +f 182//1 180//1 178//1 +f 182//1 183//1 180//1 +f 184//1 181//1 179//1 +f 185//1 181//1 184//1 +f 186//1 181//1 185//1 +f 186//1 187//1 181//1 +f 188//1 187//1 186//1 +f 189//1 183//1 182//1 +f 190//1 187//1 188//1 +f 190//1 191//1 187//1 +f 192//1 183//1 189//1 +f 193//1 191//1 190//1 +f 193//1 194//1 191//1 +f 195//1 194//1 193//1 +f 196//1 183//1 192//1 +f 195//1 197//1 194//1 +f 198//1 197//1 195//1 +f 198//1 199//1 197//1 +f 200//1 183//1 196//1 +f 201//1 199//1 198//1 +f 201//1 202//1 199//1 +f 203//1 202//1 201//1 +f 203//1 204//1 202//1 +f 205//1 183//1 200//1 +f 203//1 206//1 204//1 +f 207//1 206//1 203//1 +f 207//1 208//1 206//1 +f 207//1 209//1 208//1 +f 207//1 210//1 209//1 +f 207//1 211//1 210//1 +f 212//1 183//1 205//1 +f 207//1 213//1 211//1 +f 214//1 213//1 207//1 +f 214//1 215//1 213//1 +f 214//1 216//1 215//1 +f 214//1 217//1 216//1 +f 214//1 218//1 217//1 +f 214//1 219//1 218//1 +f 214//1 220//1 219//1 +f 214//1 221//1 220//1 +f 222//2 183//2 212//2 diff --git a/alphanumeric/Y.mtl b/alphanumeric/Y.mtl new file mode 100644 index 0000000..4cc4013 --- /dev/null +++ b/alphanumeric/Y.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.081 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/Y.obj b/alphanumeric/Y.obj new file mode 100644 index 0000000..3ddb802 --- /dev/null +++ b/alphanumeric/Y.obj @@ -0,0 +1,240 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Y.mtl +o Y +v 0.001709 0.000000 -0.005475 +v 0.003407 0.000000 -0.008213 +v 0.003352 0.000000 -0.008213 +v 0.003559 0.000000 -0.008213 +v 0.003789 0.000000 -0.008213 +v 0.004078 0.000000 -0.008213 +v 0.004405 0.000000 -0.008213 +v 0.004752 0.000000 -0.008213 +v 0.005099 0.000000 -0.008213 +v 0.005426 0.000000 -0.008213 +v 0.005715 0.000000 -0.008213 +v 0.005945 0.000000 -0.008213 +v 0.006097 0.000000 -0.008213 +v 0.006152 0.000000 -0.008213 +v 0.006774 0.000000 -0.008213 +v 0.007296 0.000000 -0.008211 +v 0.007725 0.000000 -0.008206 +v 0.008071 0.000000 -0.008201 +v 0.008341 0.000000 -0.008191 +v -0.007459 0.000000 -0.008193 +v -0.006789 0.000000 -0.008191 +v -0.007135 0.000000 -0.008193 +v -0.007759 0.000000 -0.008190 +v -0.006424 0.000000 -0.008190 +v 0.008545 0.000000 -0.008179 +v -0.008028 0.000000 -0.008190 +v -0.006044 0.000000 -0.008188 +v -0.008267 0.000000 -0.008187 +v -0.003219 0.000000 -0.008151 +v -0.008470 0.000000 -0.008182 +v -0.008635 0.000000 -0.008179 +v -0.008756 0.000000 -0.008174 +v 0.008690 0.000000 -0.008160 +v -0.008831 0.000000 -0.008168 +v -0.008856 0.000000 -0.008163 +v -0.008828 0.000000 -0.008113 +v 0.008784 0.000000 -0.008136 +v -0.003188 0.000000 -0.008098 +v 0.008837 0.000000 -0.008108 +v -0.008743 0.000000 -0.007981 +v 0.008856 0.000000 -0.008072 +v -0.003102 0.000000 -0.007954 +v 0.008850 0.000000 -0.008027 +v 0.008829 0.000000 -0.007976 +v -0.008606 0.000000 -0.007772 +v 0.008783 0.000000 -0.007903 +v -0.002972 0.000000 -0.007737 +v 0.008683 0.000000 -0.007753 +v -0.008421 0.000000 -0.007491 +v 0.008535 0.000000 -0.007529 +v -0.002809 0.000000 -0.007464 +v 0.008342 0.000000 -0.007238 +v -0.008193 0.000000 -0.007145 +v -0.002625 0.000000 -0.007154 +v 0.008109 0.000000 -0.006886 +v -0.002429 0.000000 -0.006826 +v -0.007924 0.000000 -0.006739 +v 0.007837 0.000000 -0.006479 +v -0.002233 0.000000 -0.006497 +v -0.007618 0.000000 -0.006280 +v -0.002048 0.000000 -0.006187 +v 0.007532 0.000000 -0.006022 +v -0.007281 0.000000 -0.005774 +v -0.001886 0.000000 -0.005914 +v 0.007198 0.000000 -0.005521 +v -0.001756 0.000000 -0.005697 +v -0.006914 0.000000 -0.005224 +v -0.001670 0.000000 -0.005552 +v -0.001639 0.000000 -0.005499 +v 0.006838 0.000000 -0.004983 +v -0.001424 0.000000 -0.005142 +v 0.001485 0.000000 -0.005106 +v -0.006524 0.000000 -0.004639 +v -0.001217 0.000000 -0.004797 +v 0.001270 0.000000 -0.004752 +v 0.006456 0.000000 -0.004411 +v -0.001020 0.000000 -0.004469 +v 0.001065 0.000000 -0.004417 +v -0.006112 0.000000 -0.004025 +v -0.000834 0.000000 -0.004162 +v 0.000872 0.000000 -0.004104 +v 0.006056 0.000000 -0.003815 +v -0.000662 0.000000 -0.003877 +v 0.000693 0.000000 -0.003817 +v -0.005683 0.000000 -0.003385 +v -0.000505 0.000000 -0.003618 +v 0.000530 0.000000 -0.003559 +v 0.005642 0.000000 -0.003198 +v -0.000365 0.000000 -0.003389 +v 0.000385 0.000000 -0.003333 +v -0.000245 0.000000 -0.003193 +v -0.005621 0.000000 -0.003292 +v 0.000261 0.000000 -0.003143 +v -0.005448 0.000000 -0.003035 +v 0.002593 -0.000000 0.001343 +v -0.000146 0.000000 -0.003033 +v 0.000159 0.000000 -0.002991 +v -0.005188 0.000000 -0.002646 +v -0.000070 0.000000 -0.002912 +v 0.000080 0.000000 -0.002881 +v -0.000019 0.000000 -0.002833 +v 0.000028 0.000000 -0.002816 +v 0.000004 0.000000 -0.002800 +v -0.004861 0.000000 -0.002159 +v -0.004490 0.000000 -0.001606 +v -0.004097 0.000000 -0.001021 +v -0.003704 0.000000 -0.000435 +v -0.003333 -0.000000 0.000117 +v -0.003006 -0.000000 0.000604 +v -0.002745 -0.000000 0.000993 +v -0.002572 -0.000000 0.001250 +v -0.002510 -0.000000 0.001343 +v -0.002510 -0.000000 0.004777 +v 0.002593 -0.000000 0.004777 +v -0.002510 -0.000000 0.008213 +v 0.002593 -0.000000 0.008213 +v 0.000041 -0.000000 0.008213 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.081 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 1//1 7//1 6//1 +f 1//1 8//1 7//1 +f 1//1 9//1 8//1 +f 1//1 10//1 9//1 +f 1//1 11//1 10//1 +f 1//1 12//1 11//1 +f 1//1 13//1 12//1 +f 1//1 14//1 13//1 +f 1//1 15//1 14//1 +f 1//1 16//1 15//1 +f 1//1 17//1 16//1 +f 1//1 18//1 17//1 +f 1//1 19//1 18//1 +f 20//1 21//1 22//1 +f 23//1 21//1 20//1 +f 23//1 24//1 21//1 +f 1//1 25//1 19//1 +f 26//1 24//1 23//1 +f 26//1 27//1 24//1 +f 28//1 27//1 26//1 +f 28//1 29//1 27//1 +f 30//1 29//1 28//1 +f 31//1 29//1 30//1 +f 32//1 29//1 31//1 +f 1//1 33//1 25//1 +f 34//1 29//1 32//1 +f 35//1 29//1 34//1 +f 36//1 29//1 35//1 +f 1//1 37//1 33//1 +f 36//1 38//1 29//1 +f 1//1 39//1 37//1 +f 40//1 38//1 36//1 +f 1//1 41//1 39//1 +f 40//1 42//1 38//1 +f 1//1 43//1 41//1 +f 1//1 44//1 43//1 +f 45//1 42//1 40//1 +f 1//1 46//1 44//1 +f 45//1 47//1 42//1 +f 1//1 48//1 46//1 +f 49//1 47//1 45//1 +f 1//1 50//1 48//1 +f 49//1 51//1 47//1 +f 1//1 52//1 50//1 +f 53//1 51//1 49//1 +f 53//1 54//1 51//1 +f 1//1 55//1 52//1 +f 53//1 56//1 54//1 +f 57//1 56//1 53//1 +f 1//1 58//1 55//1 +f 57//1 59//1 56//1 +f 60//1 59//1 57//1 +f 60//1 61//1 59//1 +f 1//1 62//1 58//1 +f 63//1 61//1 60//1 +f 63//1 64//1 61//1 +f 1//1 65//1 62//1 +f 63//1 66//1 64//1 +f 67//1 66//1 63//1 +f 67//1 68//1 66//1 +f 67//1 69//1 68//1 +f 1//1 70//1 65//1 +f 67//1 71//1 69//1 +f 72//1 70//1 1//1 +f 73//1 71//1 67//1 +f 73//1 74//1 71//1 +f 75//1 70//1 72//1 +f 75//1 76//1 70//1 +f 73//1 77//1 74//1 +f 78//1 76//1 75//1 +f 79//1 77//1 73//1 +f 79//1 80//1 77//1 +f 81//1 76//1 78//1 +f 81//1 82//1 76//1 +f 79//1 83//1 80//1 +f 84//1 82//1 81//1 +f 85//1 83//1 79//1 +f 85//1 86//1 83//1 +f 87//1 82//1 84//1 +f 87//1 88//1 82//1 +f 85//1 89//1 86//1 +f 90//1 88//1 87//1 +f 85//1 91//1 89//1 +f 92//1 91//1 85//1 +f 93//1 88//1 90//1 +f 94//1 91//1 92//1 +f 93//1 95//1 88//1 +f 94//1 96//1 91//1 +f 97//1 95//1 93//1 +f 98//1 96//1 94//1 +f 98//1 99//1 96//1 +f 100//1 95//1 97//1 +f 98//1 101//1 99//1 +f 102//1 95//1 100//1 +f 98//1 103//1 101//1 +f 103//1 95//1 102//1 +f 98//1 95//1 103//1 +f 104//1 95//1 98//1 +f 105//1 95//1 104//1 +f 106//1 95//1 105//1 +f 107//1 95//1 106//1 +f 108//1 95//1 107//1 +f 109//1 95//1 108//1 +f 110//1 95//1 109//1 +f 111//1 95//1 110//1 +f 112//1 95//1 111//1 +f 113//1 95//1 112//1 +f 113//1 114//1 95//1 +f 115//1 114//1 113//1 +f 115//1 116//1 114//1 +f 117//2 116//2 115//2 diff --git a/alphanumeric/Z.mtl b/alphanumeric/Z.mtl new file mode 100644 index 0000000..fb13499 --- /dev/null +++ b/alphanumeric/Z.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.083 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 0.000000 0.000000 0.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/alphanumeric/Z.obj b/alphanumeric/Z.obj new file mode 100644 index 0000000..d47d0b0 --- /dev/null +++ b/alphanumeric/Z.obj @@ -0,0 +1,45 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Z.mtl +o Z +v -0.006845 0.000000 -0.006471 +v 0.000312 0.000000 -0.008212 +v -0.006845 0.000000 -0.008212 +v 0.007467 0.000000 -0.008212 +v 0.007467 0.000000 -0.006544 +v 0.007467 0.000000 -0.004889 +v -0.006845 0.000000 -0.004728 +v -0.002775 0.000000 -0.004703 +v 0.001307 0.000000 -0.004666 +v 0.002938 0.000000 -0.000174 +v -0.003273 -0.000000 0.000088 +v -0.001580 -0.000000 0.004542 +v -0.007854 -0.000000 0.004853 +v 0.003099 -0.000000 0.004604 +v 0.007779 -0.000000 0.004667 +v 0.007815 -0.000000 0.006446 +v -0.007815 -0.000000 0.006495 +v 0.007854 -0.000000 0.008212 +v -0.007779 -0.000000 0.008150 +v 0.000038 -0.000000 0.008188 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.083 +s off +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 1//1 6//1 5//1 +f 7//1 6//1 1//1 +f 7//1 8//1 6//1 +f 8//1 9//1 6//1 +f 9//1 10//1 6//1 +f 11//1 10//1 9//1 +f 11//1 12//1 10//1 +f 13//1 12//1 11//1 +f 13//1 14//1 12//1 +f 13//1 15//1 14//1 +f 13//1 16//1 15//1 +f 17//1 16//1 13//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 20//1 18//1 19//1 diff --git a/deeplab/models/research/deeplab/README.md b/deeplab/models/research/deeplab/README.md new file mode 100644 index 0000000..8609432 --- /dev/null +++ b/deeplab/models/research/deeplab/README.md @@ -0,0 +1,321 @@ +# DeepLab: Deep Labelling for Semantic Image Segmentation + +DeepLab is a state-of-art deep learning model for semantic image segmentation, +where the goal is to assign semantic labels (e.g., person, dog, cat and so on) +to every pixel in the input image. Current implementation includes the following +features: + +1. DeepLabv1 [1]: We use *atrous convolution* to explicitly control the + resolution at which feature responses are computed within Deep Convolutional + Neural Networks. + +2. DeepLabv2 [2]: We use *atrous spatial pyramid pooling* (ASPP) to robustly + segment objects at multiple scales with filters at multiple sampling rates + and effective fields-of-views. + +3. DeepLabv3 [3]: We augment the ASPP module with *image-level feature* [5, 6] + to capture longer range information. We also include *batch normalization* + [7] parameters to facilitate the training. In particular, we applying atrous + convolution to extract output features at different output strides during + training and evaluation, which efficiently enables training BN at output + stride = 16 and attains a high performance at output stride = 8 during + evaluation. + +4. DeepLabv3+ [4]: We extend DeepLabv3 to include a simple yet effective + decoder module to refine the segmentation results especially along object + boundaries. Furthermore, in this encoder-decoder structure one can + arbitrarily control the resolution of extracted encoder features by atrous + convolution to trade-off precision and runtime. + +If you find the code useful for your research, please consider citing our latest +works: + +* DeepLabv3+: + +``` +@inproceedings{deeplabv3plus2018, + title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, + author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, + booktitle={ECCV}, + year={2018} +} +``` + +* MobileNetv2: + +``` +@inproceedings{mobilenetv22018, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + author={Mark Sandler and Andrew Howard and Menglong Zhu and Andrey Zhmoginov and Liang-Chieh Chen}, + booktitle={CVPR}, + year={2018} +} +``` + +* MobileNetv3: + +``` +@inproceedings{mobilenetv32019, + title={Searching for MobileNetV3}, + author={Andrew Howard and Mark Sandler and Grace Chu and Liang-Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, + booktitle={ICCV}, + year={2019} +} +``` + +* Architecture search for dense prediction cell: + +``` +@inproceedings{dpc2018, + title={Searching for Efficient Multi-Scale Architectures for Dense Image Prediction}, + author={Liang-Chieh Chen and Maxwell D. Collins and Yukun Zhu and George Papandreou and Barret Zoph and Florian Schroff and Hartwig Adam and Jonathon Shlens}, + booktitle={NIPS}, + year={2018} +} + +``` + +* Auto-DeepLab (also called hnasnet in core/nas_network.py): + +``` +@inproceedings{autodeeplab2019, + title={Auto-DeepLab: Hierarchical Neural Architecture Search for Semantic +Image Segmentation}, + author={Chenxi Liu and Liang-Chieh Chen and Florian Schroff and Hartwig Adam + and Wei Hua and Alan Yuille and Li Fei-Fei}, + booktitle={CVPR}, + year={2019} +} + +``` + + +In the current implementation, we support adopting the following network +backbones: + +1. MobileNetv2 [8] and MobileNetv3 [16]: A fast network structure designed + for mobile devices. + +2. Xception [9, 10]: A powerful network structure intended for server-side + deployment. + +3. ResNet-v1-{50,101} [14]: We provide both the original ResNet-v1 and its + 'beta' variant where the 'stem' is modified for semantic segmentation. + +4. PNASNet [15]: A Powerful network structure found by neural architecture + search. + +5. Auto-DeepLab (called HNASNet in the code): A segmentation-specific network + backbone found by neural architecture search. + +This directory contains our TensorFlow [11] implementation. We provide codes +allowing users to train the model, evaluate results in terms of mIOU (mean +intersection-over-union), and visualize segmentation results. We use PASCAL VOC +2012 [12] and Cityscapes [13] semantic segmentation benchmarks as an example in +the code. + +Some segmentation results on Flickr images: +

+
+
+
+

+ +## Contacts (Maintainers) + +* Liang-Chieh Chen, github: [aquariusjay](https://github.com/aquariusjay) +* YuKun Zhu, github: [yknzhu](https://github.com/YknZhu) +* George Papandreou, github: [gpapan](https://github.com/gpapan) +* Hui Hui, github: [huihui-personal](https://github.com/huihui-personal) +* Maxwell D. Collins, github: [mcollinswisc](https://github.com/mcollinswisc) +* Ting Liu: github: [tingliu](https://github.com/tingliu) + +## Tables of Contents + +Demo: + +* Colab notebook for off-the-shelf inference.
+ +Running: + +* Installation.
+* Running DeepLab on PASCAL VOC 2012 semantic segmentation dataset.
+* Running DeepLab on Cityscapes semantic segmentation dataset.
+* Running DeepLab on ADE20K semantic segmentation dataset.
+ +Models: + +* Checkpoints and frozen inference graphs.
+ +Misc: + +* Please check FAQ if you have some questions before reporting the issues.
+ +## Getting Help + +To get help with issues you may encounter while using the DeepLab Tensorflow +implementation, create a new question on +[StackOverflow](https://stackoverflow.com/) with the tag "tensorflow". + +Please report bugs (i.e., broken code, not usage questions) to the +tensorflow/models GitHub [issue +tracker](https://github.com/tensorflow/models/issues), prefixing the issue name +with "deeplab". + +## License + +All the codes in deeplab folder is covered by the [LICENSE](https://github.com/tensorflow/models/blob/master/LICENSE) +under tensorflow/models. Please refer to the LICENSE for details. + +## Change Logs + +### March 26, 2020 +* Supported EdgeTPU-DeepLab and EdgeTPU-DeepLab-slim on Cityscapes. +**Contributor**: Yun Long. + +### November 20, 2019 +* Supported MobileNetV3 large and small model variants on Cityscapes. +**Contributor**: Yukun Zhu. + + +### March 27, 2019 + +* Supported using different loss weights on different classes during training. +**Contributor**: Yuwei Yang. + + +### March 26, 2019 + +* Supported ResNet-v1-18. **Contributor**: Michalis Raptis. + + +### March 6, 2019 + +* Released the evaluation code (under the `evaluation` folder) for image +parsing, a.k.a. panoptic segmentation. In particular, the released code supports +evaluating the parsing results in terms of both the parsing covering and +panoptic quality metrics. **Contributors**: Maxwell Collins and Ting Liu. + + +### February 6, 2019 + +* Updated decoder module to exploit multiple low-level features with different +output_strides. + +### December 3, 2018 + +* Released the MobileNet-v2 checkpoint on ADE20K. + + +### November 19, 2018 + +* Supported NAS architecture for feature extraction. **Contributor**: Chenxi Liu. + +* Supported hard pixel mining during training. + + +### October 1, 2018 + +* Released MobileNet-v2 depth-multiplier = 0.5 COCO-pretrained checkpoints on +PASCAL VOC 2012, and Xception-65 COCO pretrained checkpoint (i.e., no PASCAL +pretrained). + + +### September 5, 2018 + +* Released Cityscapes pretrained checkpoints with found best dense prediction cell. + + +### May 26, 2018 + +* Updated ADE20K pretrained checkpoint. + + +### May 18, 2018 +* Added builders for ResNet-v1 and Xception model variants. +* Added ADE20K support, including colormap and pretrained Xception_65 checkpoint. +* Fixed a bug on using non-default depth_multiplier for MobileNet-v2. + + +### March 22, 2018 + +* Released checkpoints using MobileNet-V2 as network backbone and pretrained on +PASCAL VOC 2012 and Cityscapes. + + +### March 5, 2018 + +* First release of DeepLab in TensorFlow including deeper Xception network +backbone. Included chekcpoints that have been pretrained on PASCAL VOC 2012 +and Cityscapes. + +## References + +1. **Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs**
+ Liang-Chieh Chen+, George Papandreou+, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (+ equal + contribution).
+ [[link]](https://arxiv.org/abs/1412.7062). In ICLR, 2015. + +2. **DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,** + **Atrous Convolution, and Fully Connected CRFs**
+ Liang-Chieh Chen+, George Papandreou+, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille (+ equal + contribution).
+ [[link]](http://arxiv.org/abs/1606.00915). TPAMI 2017. + +3. **Rethinking Atrous Convolution for Semantic Image Segmentation**
+ Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam.
+ [[link]](http://arxiv.org/abs/1706.05587). arXiv: 1706.05587, 2017. + +4. **Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation**
+ Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam.
+ [[link]](https://arxiv.org/abs/1802.02611). In ECCV, 2018. + +5. **ParseNet: Looking Wider to See Better**
+ Wei Liu, Andrew Rabinovich, Alexander C Berg
+ [[link]](https://arxiv.org/abs/1506.04579). arXiv:1506.04579, 2015. + +6. **Pyramid Scene Parsing Network**
+ Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, Jiaya Jia
+ [[link]](https://arxiv.org/abs/1612.01105). In CVPR, 2017. + +7. **Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate shift**
+ Sergey Ioffe, Christian Szegedy
+ [[link]](https://arxiv.org/abs/1502.03167). In ICML, 2015. + +8. **MobileNetV2: Inverted Residuals and Linear Bottlenecks**
+ Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
+ [[link]](https://arxiv.org/abs/1801.04381). In CVPR, 2018. + +9. **Xception: Deep Learning with Depthwise Separable Convolutions**
+ François Chollet
+ [[link]](https://arxiv.org/abs/1610.02357). In CVPR, 2017. + +10. **Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge 2017 Entry**
+ Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei, Jifeng Dai
+ [[link]](http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf). ICCV COCO Challenge + Workshop, 2017. + +11. **Tensorflow: Large-Scale Machine Learning on Heterogeneous Distributed Systems**
+ M. Abadi, A. Agarwal, et al.
+ [[link]](https://arxiv.org/abs/1603.04467). arXiv:1603.04467, 2016. + +12. **The Pascal Visual Object Classes Challenge – A Retrospective,**
+ Mark Everingham, S. M. Ali Eslami, Luc Van Gool, Christopher K. I. Williams, John + Winn, and Andrew Zisserma.
+ [[link]](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). IJCV, 2014. + +13. **The Cityscapes Dataset for Semantic Urban Scene Understanding**
+ Cordts, Marius, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, Bernt Schiele.
+ [[link]](https://www.cityscapes-dataset.com/). In CVPR, 2016. + +14. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
+ [[link]](https://arxiv.org/abs/1512.03385). In CVPR, 2016. + +15. **Progressive Neural Architecture Search**
+ Chenxi Liu, Barret Zoph, Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, Alan Yuille, Jonathan Huang, Kevin Murphy.
+ [[link]](https://arxiv.org/abs/1712.00559). In ECCV, 2018. + +16. **Searching for MobileNetV3**
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam.
+ [[link]](https://arxiv.org/abs/1905.02244). In ICCV, 2019. diff --git a/deeplab/models/research/deeplab/__init__.py b/deeplab/models/research/deeplab/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/common.py b/deeplab/models/research/deeplab/common.py new file mode 100644 index 0000000..928f717 --- /dev/null +++ b/deeplab/models/research/deeplab/common.py @@ -0,0 +1,295 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides flags that are common to scripts. + +Common flags from train/eval/vis/export_model.py are collected in this script. +""" +import collections +import copy +import json +import tensorflow as tf + +flags = tf.app.flags + +# Flags for input preprocessing. + +flags.DEFINE_integer('min_resize_value', None, + 'Desired size of the smaller image side.') + +flags.DEFINE_integer('max_resize_value', None, + 'Maximum allowed size of the larger image side.') + +flags.DEFINE_integer('resize_factor', None, + 'Resized dimensions are multiple of factor plus one.') + +flags.DEFINE_boolean('keep_aspect_ratio', True, + 'Keep aspect ratio after resizing or not.') + +# Model dependent flags. + +flags.DEFINE_integer('logits_kernel_size', 1, + 'The kernel size for the convolutional kernel that ' + 'generates logits.') + +# When using 'mobilent_v2', we set atrous_rates = decoder_output_stride = None. +# When using 'xception_65' or 'resnet_v1' model variants, we set +# atrous_rates = [6, 12, 18] (output stride 16) and decoder_output_stride = 4. +# See core/feature_extractor.py for supported model variants. +flags.DEFINE_string('model_variant', 'mobilenet_v2', 'DeepLab model variant.') + +flags.DEFINE_multi_float('image_pyramid', None, + 'Input scales for multi-scale feature extraction.') + +flags.DEFINE_boolean('add_image_level_feature', True, + 'Add image level feature.') + +flags.DEFINE_list( + 'image_pooling_crop_size', None, + 'Image pooling crop size [height, width] used in the ASPP module. When ' + 'value is None, the model performs image pooling with "crop_size". This' + 'flag is useful when one likes to use different image pooling sizes.') + +flags.DEFINE_list( + 'image_pooling_stride', '1,1', + 'Image pooling stride [height, width] used in the ASPP image pooling. ') + +flags.DEFINE_boolean('aspp_with_batch_norm', True, + 'Use batch norm parameters for ASPP or not.') + +flags.DEFINE_boolean('aspp_with_separable_conv', True, + 'Use separable convolution for ASPP or not.') + +# Defaults to None. Set multi_grid = [1, 2, 4] when using provided +# 'resnet_v1_{50,101}_beta' checkpoints. +flags.DEFINE_multi_integer('multi_grid', None, + 'Employ a hierarchy of atrous rates for ResNet.') + +flags.DEFINE_float('depth_multiplier', 1.0, + 'Multiplier for the depth (number of channels) for all ' + 'convolution ops used in MobileNet.') + +flags.DEFINE_integer('divisible_by', None, + 'An integer that ensures the layer # channels are ' + 'divisible by this value. Used in MobileNet.') + +# For `xception_65`, use decoder_output_stride = 4. For `mobilenet_v2`, use +# decoder_output_stride = None. +flags.DEFINE_list('decoder_output_stride', None, + 'Comma-separated list of strings with the number specifying ' + 'output stride of low-level features at each network level.' + 'Current semantic segmentation implementation assumes at ' + 'most one output stride (i.e., either None or a list with ' + 'only one element.') + +flags.DEFINE_boolean('decoder_use_separable_conv', True, + 'Employ separable convolution for decoder or not.') + +flags.DEFINE_enum('merge_method', 'max', ['max', 'avg'], + 'Scheme to merge multi scale features.') + +flags.DEFINE_boolean( + 'prediction_with_upsampled_logits', True, + 'When performing prediction, there are two options: (1) bilinear ' + 'upsampling the logits followed by softmax, or (2) softmax followed by ' + 'bilinear upsampling.') + +flags.DEFINE_string( + 'dense_prediction_cell_json', + '', + 'A JSON file that specifies the dense prediction cell.') + +flags.DEFINE_integer( + 'nas_stem_output_num_conv_filters', 20, + 'Number of filters of the stem output tensor in NAS models.') + +flags.DEFINE_bool('nas_use_classification_head', False, + 'Use image classification head for NAS model variants.') + +flags.DEFINE_bool('nas_remove_os32_stride', False, + 'Remove the stride in the output stride 32 branch.') + +flags.DEFINE_bool('use_bounded_activation', False, + 'Whether or not to use bounded activations. Bounded ' + 'activations better lend themselves to quantized inference.') + +flags.DEFINE_boolean('aspp_with_concat_projection', True, + 'ASPP with concat projection.') + +flags.DEFINE_boolean('aspp_with_squeeze_and_excitation', False, + 'ASPP with squeeze and excitation.') + +flags.DEFINE_integer('aspp_convs_filters', 256, 'ASPP convolution filters.') + +flags.DEFINE_boolean('decoder_use_sum_merge', False, + 'Decoder uses simply sum merge.') + +flags.DEFINE_integer('decoder_filters', 256, 'Decoder filters.') + +flags.DEFINE_boolean('decoder_output_is_logits', False, + 'Use decoder output as logits or not.') + +flags.DEFINE_boolean('image_se_uses_qsigmoid', False, 'Use q-sigmoid.') + +flags.DEFINE_multi_float( + 'label_weights', None, + 'A list of label weights, each element represents the weight for the label ' + 'of its index, for example, label_weights = [0.1, 0.5] means the weight ' + 'for label 0 is 0.1 and the weight for label 1 is 0.5. If set as None, all ' + 'the labels have the same weight 1.0.') + +flags.DEFINE_float('batch_norm_decay', 0.9997, 'Batchnorm decay.') + +FLAGS = flags.FLAGS + +# Constants + +# Perform semantic segmentation predictions. +OUTPUT_TYPE = 'semantic' + +# Semantic segmentation item names. +LABELS_CLASS = 'labels_class' +IMAGE = 'image' +HEIGHT = 'height' +WIDTH = 'width' +IMAGE_NAME = 'image_name' +LABEL = 'label' +ORIGINAL_IMAGE = 'original_image' + +# Test set name. +TEST_SET = 'test' + + +class ModelOptions( + collections.namedtuple('ModelOptions', [ + 'outputs_to_num_classes', + 'crop_size', + 'atrous_rates', + 'output_stride', + 'preprocessed_images_dtype', + 'merge_method', + 'add_image_level_feature', + 'image_pooling_crop_size', + 'image_pooling_stride', + 'aspp_with_batch_norm', + 'aspp_with_separable_conv', + 'multi_grid', + 'decoder_output_stride', + 'decoder_use_separable_conv', + 'logits_kernel_size', + 'model_variant', + 'depth_multiplier', + 'divisible_by', + 'prediction_with_upsampled_logits', + 'dense_prediction_cell_config', + 'nas_architecture_options', + 'use_bounded_activation', + 'aspp_with_concat_projection', + 'aspp_with_squeeze_and_excitation', + 'aspp_convs_filters', + 'decoder_use_sum_merge', + 'decoder_filters', + 'decoder_output_is_logits', + 'image_se_uses_qsigmoid', + 'label_weights', + 'sync_batch_norm_method', + 'batch_norm_decay', + ])): + """Immutable class to hold model options.""" + + __slots__ = () + + def __new__(cls, + outputs_to_num_classes, + crop_size=None, + atrous_rates=None, + output_stride=8, + preprocessed_images_dtype=tf.float32): + """Constructor to set default values. + + Args: + outputs_to_num_classes: A dictionary from output type to the number of + classes. For example, for the task of semantic segmentation with 21 + semantic classes, we would have outputs_to_num_classes['semantic'] = 21. + crop_size: A tuple [crop_height, crop_width]. + atrous_rates: A list of atrous convolution rates for ASPP. + output_stride: The ratio of input to output spatial resolution. + preprocessed_images_dtype: The type after the preprocessing function. + + Returns: + A new ModelOptions instance. + """ + dense_prediction_cell_config = None + if FLAGS.dense_prediction_cell_json: + with tf.gfile.Open(FLAGS.dense_prediction_cell_json, 'r') as f: + dense_prediction_cell_config = json.load(f) + decoder_output_stride = None + if FLAGS.decoder_output_stride: + decoder_output_stride = [ + int(x) for x in FLAGS.decoder_output_stride] + if sorted(decoder_output_stride, reverse=True) != decoder_output_stride: + raise ValueError('Decoder output stride need to be sorted in the ' + 'descending order.') + image_pooling_crop_size = None + if FLAGS.image_pooling_crop_size: + image_pooling_crop_size = [int(x) for x in FLAGS.image_pooling_crop_size] + image_pooling_stride = [1, 1] + if FLAGS.image_pooling_stride: + image_pooling_stride = [int(x) for x in FLAGS.image_pooling_stride] + label_weights = FLAGS.label_weights + if label_weights is None: + label_weights = 1.0 + nas_architecture_options = { + 'nas_stem_output_num_conv_filters': ( + FLAGS.nas_stem_output_num_conv_filters), + 'nas_use_classification_head': FLAGS.nas_use_classification_head, + 'nas_remove_os32_stride': FLAGS.nas_remove_os32_stride, + } + return super(ModelOptions, cls).__new__( + cls, outputs_to_num_classes, crop_size, atrous_rates, output_stride, + preprocessed_images_dtype, + FLAGS.merge_method, + FLAGS.add_image_level_feature, + image_pooling_crop_size, + image_pooling_stride, + FLAGS.aspp_with_batch_norm, + FLAGS.aspp_with_separable_conv, + FLAGS.multi_grid, + decoder_output_stride, + FLAGS.decoder_use_separable_conv, + FLAGS.logits_kernel_size, + FLAGS.model_variant, + FLAGS.depth_multiplier, + FLAGS.divisible_by, + FLAGS.prediction_with_upsampled_logits, + dense_prediction_cell_config, + nas_architecture_options, + FLAGS.use_bounded_activation, + FLAGS.aspp_with_concat_projection, + FLAGS.aspp_with_squeeze_and_excitation, + FLAGS.aspp_convs_filters, + FLAGS.decoder_use_sum_merge, + FLAGS.decoder_filters, + FLAGS.decoder_output_is_logits, + FLAGS.image_se_uses_qsigmoid, + label_weights, + 'None', + FLAGS.batch_norm_decay) + + def __deepcopy__(self, memo): + return ModelOptions(copy.deepcopy(self.outputs_to_num_classes), + self.crop_size, + self.atrous_rates, + self.output_stride, + self.preprocessed_images_dtype) diff --git a/deeplab/models/research/deeplab/common_test.py b/deeplab/models/research/deeplab/common_test.py new file mode 100644 index 0000000..45b64e5 --- /dev/null +++ b/deeplab/models/research/deeplab/common_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for common.py.""" +import copy + +import tensorflow as tf + +from deeplab import common + + +class CommonTest(tf.test.TestCase): + + def testOutputsToNumClasses(self): + num_classes = 21 + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: num_classes}) + self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE], + num_classes) + + def testDeepcopy(self): + num_classes = 21 + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: num_classes}) + model_options_new = copy.deepcopy(model_options) + self.assertEqual((model_options_new. + outputs_to_num_classes[common.OUTPUT_TYPE]), + num_classes) + + num_classes_new = 22 + model_options_new.outputs_to_num_classes[common.OUTPUT_TYPE] = ( + num_classes_new) + self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE], + num_classes) + self.assertEqual((model_options_new. + outputs_to_num_classes[common.OUTPUT_TYPE]), + num_classes_new) + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/convert_to_tflite.py b/deeplab/models/research/deeplab/convert_to_tflite.py new file mode 100644 index 0000000..d23ce9e --- /dev/null +++ b/deeplab/models/research/deeplab/convert_to_tflite.py @@ -0,0 +1,112 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tools to convert a quantized deeplab model to tflite.""" + +from absl import app +from absl import flags +import numpy as np +from PIL import Image +import tensorflow as tf + + +flags.DEFINE_string('quantized_graph_def_path', None, + 'Path to quantized graphdef.') +flags.DEFINE_string('output_tflite_path', None, 'Output TFlite model path.') +flags.DEFINE_string( + 'input_tensor_name', None, + 'Input tensor to TFlite model. This usually should be the input tensor to ' + 'model backbone.' +) +flags.DEFINE_string( + 'output_tensor_name', 'ArgMax:0', + 'Output tensor name of TFlite model. By default we output the raw semantic ' + 'label predictions.' +) +flags.DEFINE_string( + 'test_image_path', None, + 'Path to an image to test the consistency between input graphdef / ' + 'converted tflite model.' +) + +FLAGS = flags.FLAGS + + +def convert_to_tflite(quantized_graphdef, + backbone_input_tensor, + output_tensor): + """Helper method to convert quantized deeplab model to TFlite.""" + with tf.Graph().as_default() as graph: + tf.graph_util.import_graph_def(quantized_graphdef, name='') + sess = tf.compat.v1.Session() + + tflite_input = graph.get_tensor_by_name(backbone_input_tensor) + tflite_output = graph.get_tensor_by_name(output_tensor) + converter = tf.compat.v1.lite.TFLiteConverter.from_session( + sess, [tflite_input], [tflite_output]) + converter.inference_type = tf.compat.v1.lite.constants.QUANTIZED_UINT8 + input_arrays = converter.get_input_arrays() + converter.quantized_input_stats = {input_arrays[0]: (127.5, 127.5)} + return converter.convert() + + +def check_tflite_consistency(graph_def, tflite_model, image_path): + """Runs tflite and frozen graph on same input, check their outputs match.""" + # Load tflite model and check input size. + interpreter = tf.lite.Interpreter(model_content=tflite_model) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + height, width = input_details[0]['shape'][1:3] + + # Prepare input image data. + with tf.io.gfile.GFile(image_path, 'rb') as f: + image = Image.open(f) + image = np.asarray(image.convert('RGB').resize((width, height))) + image = np.expand_dims(image, 0) + + # Output from tflite model. + interpreter.set_tensor(input_details[0]['index'], image) + interpreter.invoke() + output_tflite = interpreter.get_tensor(output_details[0]['index']) + + with tf.Graph().as_default(): + tf.graph_util.import_graph_def(graph_def, name='') + with tf.compat.v1.Session() as sess: + # Note here the graph will include preprocessing part of the graph + # (e.g. resize, pad, normalize). Given the input image size is at the + # crop size (backbone input size), resize / pad should be an identity op. + output_graph = sess.run( + FLAGS.output_tensor_name, feed_dict={'ImageTensor:0': image}) + + print('%.2f%% pixels have matched semantic labels.' % ( + 100 * np.mean(output_graph == output_tflite))) + + +def main(unused_argv): + with tf.io.gfile.GFile(FLAGS.quantized_graph_def_path, 'rb') as f: + graph_def = tf.compat.v1.GraphDef.FromString(f.read()) + tflite_model = convert_to_tflite( + graph_def, FLAGS.input_tensor_name, FLAGS.output_tensor_name) + + if FLAGS.output_tflite_path: + with tf.io.gfile.GFile(FLAGS.output_tflite_path, 'wb') as f: + f.write(tflite_model) + + if FLAGS.test_image_path: + check_tflite_consistency(graph_def, tflite_model, FLAGS.test_image_path) + + +if __name__ == '__main__': + app.run(main) diff --git a/deeplab/models/research/deeplab/core/__init__.py b/deeplab/models/research/deeplab/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/core/conv2d_ws.py b/deeplab/models/research/deeplab/core/conv2d_ws.py new file mode 100644 index 0000000..9aaaf33 --- /dev/null +++ b/deeplab/models/research/deeplab/core/conv2d_ws.py @@ -0,0 +1,369 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Augment slim.conv2d with optional Weight Standardization (WS). + +WS is a normalization method to accelerate micro-batch training. When used with +Group Normalization and trained with 1 image/GPU, WS is able to match or +outperform the performances of BN trained with large batch sizes. +[1] Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille + Weight Standardization. arXiv:1903.10520 +[2] Lei Huang, Xianglong Liu, Yang Liu, Bo Lang, Dacheng Tao + Centered Weight Normalization in Accelerating Training of Deep Neural + Networks. ICCV 2017 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers + +from tensorflow.contrib.layers.python.layers import layers +from tensorflow.contrib.layers.python.layers import utils + + +class Conv2D(tf.keras.layers.Conv2D, tf.layers.Layer): + """2D convolution layer (e.g. spatial convolution over images). + + This layer creates a convolution kernel that is convolved + (actually cross-correlated) with the layer input to produce a tensor of + outputs. If `use_bias` is True (and a `bias_initializer` is provided), + a bias vector is created and added to the outputs. Finally, if + `activation` is not `None`, it is applied to the outputs as well. + """ + + def __init__(self, + filters, + kernel_size, + strides=(1, 1), + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + activation=None, + use_bias=True, + kernel_initializer=None, + bias_initializer=tf.zeros_initializer(), + kernel_regularizer=None, + bias_regularizer=None, + use_weight_standardization=False, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + trainable=True, + name=None, + **kwargs): + """Constructs the 2D convolution layer. + + Args: + filters: Integer, the dimensionality of the output space (i.e. the number + of filters in the convolution). + kernel_size: An integer or tuple/list of 2 integers, specifying the height + and width of the 2D convolution window. Can be a single integer to + specify the same value for all spatial dimensions. + strides: An integer or tuple/list of 2 integers, specifying the strides of + the convolution along the height and width. Can be a single integer to + specify the same value for all spatial dimensions. Specifying any stride + value != 1 is incompatible with specifying any `dilation_rate` value != + 1. + padding: One of `"valid"` or `"same"` (case-insensitive). + data_format: A string, one of `channels_last` (default) or + `channels_first`. The ordering of the dimensions in the inputs. + `channels_last` corresponds to inputs with shape `(batch, height, width, + channels)` while `channels_first` corresponds to inputs with shape + `(batch, channels, height, width)`. + dilation_rate: An integer or tuple/list of 2 integers, specifying the + dilation rate to use for dilated convolution. Can be a single integer to + specify the same value for all spatial dimensions. Currently, specifying + any `dilation_rate` value != 1 is incompatible with specifying any + stride value != 1. + activation: Activation function. Set it to None to maintain a linear + activation. + use_bias: Boolean, whether the layer uses a bias. + kernel_initializer: An initializer for the convolution kernel. + bias_initializer: An initializer for the bias vector. If None, the default + initializer will be used. + kernel_regularizer: Optional regularizer for the convolution kernel. + bias_regularizer: Optional regularizer for the bias vector. + use_weight_standardization: Boolean, whether the layer uses weight + standardization. + activity_regularizer: Optional regularizer function for the output. + kernel_constraint: Optional projection function to be applied to the + kernel after being updated by an `Optimizer` (e.g. used to implement + norm constraints or value constraints for layer weights). The function + must take as input the unprojected variable and must return the + projected variable (which must have the same shape). Constraints are not + safe to use when doing asynchronous distributed training. + bias_constraint: Optional projection function to be applied to the bias + after being updated by an `Optimizer`. + trainable: Boolean, if `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + name: A string, the name of the layer. + **kwargs: Arbitrary keyword arguments passed to tf.keras.layers.Conv2D + """ + + super(Conv2D, self).__init__( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + bias_constraint=bias_constraint, + trainable=trainable, + name=name, + **kwargs) + self.use_weight_standardization = use_weight_standardization + + def call(self, inputs): + if self.use_weight_standardization: + mean, var = tf.nn.moments(self.kernel, [0, 1, 2], keep_dims=True) + kernel = (self.kernel - mean) / tf.sqrt(var + 1e-5) + outputs = self._convolution_op(inputs, kernel) + else: + outputs = self._convolution_op(inputs, self.kernel) + + if self.use_bias: + if self.data_format == 'channels_first': + if self.rank == 1: + # tf.nn.bias_add does not accept a 1D input tensor. + bias = tf.reshape(self.bias, (1, self.filters, 1)) + outputs += bias + else: + outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW') + else: + outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC') + + if self.activation is not None: + return self.activation(outputs) + return outputs + + +@contrib_framework.add_arg_scope +def conv2d(inputs, + num_outputs, + kernel_size, + stride=1, + padding='SAME', + data_format=None, + rate=1, + activation_fn=tf.nn.relu, + normalizer_fn=None, + normalizer_params=None, + weights_initializer=contrib_layers.xavier_initializer(), + weights_regularizer=None, + biases_initializer=tf.zeros_initializer(), + biases_regularizer=None, + use_weight_standardization=False, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + scope=None): + """Adds a 2D convolution followed by an optional batch_norm layer. + + `convolution` creates a variable called `weights`, representing the + convolutional kernel, that is convolved (actually cross-correlated) with the + `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is + provided (such as `batch_norm`), it is then applied. Otherwise, if + `normalizer_fn` is None and a `biases_initializer` is provided then a `biases` + variable would be created and added the activations. Finally, if + `activation_fn` is not `None`, it is applied to the activations as well. + + Performs atrous convolution with input stride/dilation rate equal to `rate` + if a value > 1 for any dimension of `rate` is specified. In this case + `stride` values != 1 are not supported. + + Args: + inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape + + [in_channels]` if data_format does not start with "NC" (default), or + `[batch_size, in_channels] + input_spatial_shape` if data_format starts + with "NC". + num_outputs: Integer, the number of output filters. + kernel_size: A sequence of N positive integers specifying the spatial + dimensions of the filters. Can be a single integer to specify the same + value for all spatial dimensions. + stride: A sequence of N positive integers specifying the stride at which to + compute output. Can be a single integer to specify the same value for all + spatial dimensions. Specifying any `stride` value != 1 is incompatible + with specifying any `rate` value != 1. + padding: One of `"VALID"` or `"SAME"`. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For + N=3, the valid values are "NDHWC" (default) and "NCDHW". + rate: A sequence of N positive integers specifying the dilation rate to use + for atrous convolution. Can be a single integer to specify the same value + for all spatial dimensions. Specifying any `rate` value != 1 is + incompatible with specifying any `stride` value != 1. + activation_fn: Activation function. The default value is a ReLU function. + Explicitly set it to None to skip it and maintain a linear activation. + normalizer_fn: Normalization function to use instead of `biases`. If + `normalizer_fn` is provided then `biases_initializer` and + `biases_regularizer` are ignored and `biases` are not created nor added. + default set to None for no normalizer function + normalizer_params: Normalization function parameters. + weights_initializer: An initializer for the weights. + weights_regularizer: Optional regularizer for the weights. + biases_initializer: An initializer for the biases. If None skip biases. + biases_regularizer: Optional regularizer for the biases. + use_weight_standardization: Boolean, whether the layer uses weight + standardization. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for all the variables or + a dictionary containing a different list of collection per variable. + outputs_collections: Collection to add the outputs. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + scope: Optional scope for `variable_scope`. + + Returns: + A tensor representing the output of the operation. + + Raises: + ValueError: If `data_format` is invalid. + ValueError: Both 'rate' and `stride` are not uniformly 1. + """ + if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']: + raise ValueError('Invalid data_format: %r' % (data_format,)) + + # pylint: disable=protected-access + layer_variable_getter = layers._build_variable_getter({ + 'bias': 'biases', + 'kernel': 'weights' + }) + # pylint: enable=protected-access + with tf.variable_scope( + scope, 'Conv', [inputs], reuse=reuse, + custom_getter=layer_variable_getter) as sc: + inputs = tf.convert_to_tensor(inputs) + input_rank = inputs.get_shape().ndims + + if input_rank != 4: + raise ValueError('Convolution expects input with rank %d, got %d' % + (4, input_rank)) + + data_format = ('channels_first' if data_format and + data_format.startswith('NC') else 'channels_last') + layer = Conv2D( + filters=num_outputs, + kernel_size=kernel_size, + strides=stride, + padding=padding, + data_format=data_format, + dilation_rate=rate, + activation=None, + use_bias=not normalizer_fn and biases_initializer, + kernel_initializer=weights_initializer, + bias_initializer=biases_initializer, + kernel_regularizer=weights_regularizer, + bias_regularizer=biases_regularizer, + use_weight_standardization=use_weight_standardization, + activity_regularizer=None, + trainable=trainable, + name=sc.name, + dtype=inputs.dtype.base_dtype, + _scope=sc, + _reuse=reuse) + outputs = layer.apply(inputs) + + # Add variables to collections. + # pylint: disable=protected-access + layers._add_variable_to_collections(layer.kernel, variables_collections, + 'weights') + if layer.use_bias: + layers._add_variable_to_collections(layer.bias, variables_collections, + 'biases') + # pylint: enable=protected-access + if normalizer_fn is not None: + normalizer_params = normalizer_params or {} + outputs = normalizer_fn(outputs, **normalizer_params) + + if activation_fn is not None: + outputs = activation_fn(outputs) + return utils.collect_named_outputs(outputs_collections, sc.name, outputs) + + +def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): + """Strided 2-D convolution with 'SAME' padding. + + When stride > 1, then we do explicit zero-padding, followed by conv2d with + 'VALID' padding. + + Note that + + net = conv2d_same(inputs, num_outputs, 3, stride=stride) + + is equivalent to + + net = conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') + net = subsample(net, factor=stride) + + whereas + + net = conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') + + is different when the input's height or width is even, which is why we add the + current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). + + Args: + inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. + num_outputs: An integer, the number of output filters. + kernel_size: An int with the kernel_size of the filters. + stride: An integer, the output stride. + rate: An integer, rate for atrous convolution. + scope: Scope. + + Returns: + output: A 4-D tensor of size [batch, height_out, width_out, channels] with + the convolution output. + """ + if stride == 1: + return conv2d( + inputs, + num_outputs, + kernel_size, + stride=1, + rate=rate, + padding='SAME', + scope=scope) + else: + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + inputs = tf.pad(inputs, + [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + return conv2d( + inputs, + num_outputs, + kernel_size, + stride=stride, + rate=rate, + padding='VALID', + scope=scope) diff --git a/deeplab/models/research/deeplab/core/conv2d_ws_test.py b/deeplab/models/research/deeplab/core/conv2d_ws_test.py new file mode 100644 index 0000000..b6bea85 --- /dev/null +++ b/deeplab/models/research/deeplab/core/conv2d_ws_test.py @@ -0,0 +1,420 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for conv2d_ws.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers +from deeplab.core import conv2d_ws + + +class ConvolutionTest(tf.test.TestCase): + + def testInvalidShape(self): + with self.cached_session(): + images_3d = tf.random_uniform((5, 6, 7, 9, 3), seed=1) + with self.assertRaisesRegexp( + ValueError, 'Convolution expects input with rank 4, got 5'): + conv2d_ws.conv2d(images_3d, 32, 3) + + def testInvalidDataFormat(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.assertRaisesRegexp(ValueError, 'data_format'): + conv2d_ws.conv2d(images, 32, 3, data_format='CHWN') + + def testCreateConv(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32) + output = conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateConvWithWS(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32) + output = conv2d_ws.conv2d( + images, 32, [3, 3], use_weight_standardization=True) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateConvNCHW(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32) + output = conv2d_ws.conv2d(images, 32, [3, 3], data_format='NCHW') + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateSquareConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, 3) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateConvWithTensorShape(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, images.get_shape()[1:3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateFullyConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + output = conv2d_ws.conv2d( + images, 64, images.get_shape()[1:3], padding='VALID') + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [64]) + + def testFullyConvWithCustomGetter(self): + height, width = 7, 9 + with self.cached_session(): + called = [0] + + def custom_getter(getter, *args, **kwargs): + called[0] += 1 + return getter(*args, **kwargs) + + with tf.variable_scope('test', custom_getter=custom_getter): + images = tf.random_uniform((5, height, width, 32), seed=1) + conv2d_ws.conv2d(images, 64, images.get_shape()[1:3]) + self.assertEqual(called[0], 2) # Custom getter called twice. + + def testCreateVerticalConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 4), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 1]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateHorizontalConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 4), seed=1) + output = conv2d_ws.conv2d(images, 32, [1, 3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32]) + + def testCreateConvWithStride(self): + height, width = 6, 8 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], stride=2) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), + [5, height / 2, width / 2, 32]) + + def testCreateConvCreatesWeightsAndBiasesVars(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.cached_session(): + self.assertFalse(contrib_framework.get_variables('conv1/weights')) + self.assertFalse(contrib_framework.get_variables('conv1/biases')) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertTrue(contrib_framework.get_variables('conv1/weights')) + self.assertTrue(contrib_framework.get_variables('conv1/biases')) + + def testCreateConvWithScope(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(output.op.name, 'conv1/Relu') + + def testCreateConvWithCollection(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with tf.name_scope('fe'): + conv = conv2d_ws.conv2d( + images, 32, [3, 3], outputs_collections='outputs', scope='Conv') + output_collected = tf.get_collection('outputs')[0] + self.assertEqual(output_collected.aliases, ['Conv']) + self.assertEqual(output_collected, conv) + + def testCreateConvWithoutActivation(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], activation_fn=None) + self.assertEqual(output.op.name, 'Conv/BiasAdd') + + def testCreateConvValid(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], padding='VALID') + self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32]) + + def testCreateConvWithWD(self): + height, width = 7, 9 + weight_decay = 0.01 + with self.cached_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1) + regularizer = contrib_layers.l2_regularizer(weight_decay) + conv2d_ws.conv2d(images, 32, [3, 3], weights_regularizer=regularizer) + l2_loss = tf.nn.l2_loss( + contrib_framework.get_variables_by_name('weights')[0]) + wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] + self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer') + sess.run(tf.global_variables_initializer()) + self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval()) + + def testCreateConvNoRegularizers(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual( + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) + + def testReuseVars(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(len(contrib_framework.get_variables()), 2) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 2) + + def testNonReuseVars(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 2) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 4) + + def testReuseConvWithWD(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + weight_decay = contrib_layers.l2_regularizer(0.01) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + weights_regularizer=weight_decay): + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(len(contrib_framework.get_variables()), 2) + self.assertEqual( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 2) + self.assertEqual( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + + def testConvWithBatchNorm(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + normalizer_fn=contrib_layers.batch_norm, + normalizer_params={'decay': 0.9}): + net = conv2d_ws.conv2d(images, 32, [3, 3]) + net = conv2d_ws.conv2d(net, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 8) + self.assertEqual( + len(contrib_framework.get_variables('Conv/BatchNorm')), 3) + self.assertEqual( + len(contrib_framework.get_variables('Conv_1/BatchNorm')), 3) + + def testReuseConvWithBatchNorm(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + normalizer_fn=contrib_layers.batch_norm, + normalizer_params={'decay': 0.9}): + net = conv2d_ws.conv2d(images, 32, [3, 3], scope='Conv') + net = conv2d_ws.conv2d(net, 32, [3, 3], scope='Conv', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 4) + self.assertEqual( + len(contrib_framework.get_variables('Conv/BatchNorm')), 3) + self.assertEqual( + len(contrib_framework.get_variables('Conv_1/BatchNorm')), 0) + + def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.cached_session(): + self.assertFalse(contrib_framework.get_variables('conv1/weights')) + self.assertFalse(contrib_framework.get_variables('conv1/biases')) + conv2d_ws.conv2d(images, 32, [3, 3], rate=2, scope='conv1') + self.assertTrue(contrib_framework.get_variables('conv1/weights')) + self.assertTrue(contrib_framework.get_variables('conv1/biases')) + + def testOutputSizeWithRateTwoSamePadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 10, 12, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='SAME') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testOutputSizeWithRateTwoValidPadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 6, 8, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testOutputSizeWithRateTwoThreeValidPadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 6, 6, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=[2, 3], padding='VALID') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testDynamicOutputSizeWithRateOneValidPadding(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [None, None, None, num_filters] + expected_size_dynamic = [5, 7, 9, num_filters] + + with self.cached_session(): + images = tf.placeholder(np.float32, [None, None, None, input_size[3]]) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=1, padding='VALID') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self): + if tf.test.is_gpu_available(cuda_only=True): + num_filters = 32 + input_size = [5, 3, 9, 11] + expected_size = [None, num_filters, None, None] + expected_size_dynamic = [5, num_filters, 7, 9] + + with self.session(use_gpu=True): + images = tf.placeholder(np.float32, [None, input_size[1], None, None]) + output = conv2d_ws.conv2d( + images, + num_filters, [3, 3], + rate=1, + padding='VALID', + data_format='NCHW') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testDynamicOutputSizeWithRateTwoValidPadding(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [None, None, None, num_filters] + expected_size_dynamic = [5, 5, 7, num_filters] + + with self.cached_session(): + images = tf.placeholder(np.float32, [None, None, None, input_size[3]]) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testWithScope(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [5, 5, 7, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7') + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'conv7/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testWithScopeWithoutActivation(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [5, 5, 7, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, + num_filters, [3, 3], + rate=2, + padding='VALID', + activation_fn=None, + scope='conv7') + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'conv7/BiasAdd') + self.assertListEqual(list(output.eval().shape), expected_size) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/dense_prediction_cell.py b/deeplab/models/research/deeplab/core/dense_prediction_cell.py new file mode 100644 index 0000000..8e32f8e --- /dev/null +++ b/deeplab/models/research/deeplab/core/dense_prediction_cell.py @@ -0,0 +1,290 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Dense Prediction Cell class that can be evolved in semantic segmentation. + +DensePredictionCell is used as a `layer` in semantic segmentation whose +architecture is determined by the `config`, a dictionary specifying +the architecture. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import utils + +slim = contrib_slim + +# Local constants. +_META_ARCHITECTURE_SCOPE = 'meta_architecture' +_CONCAT_PROJECTION_SCOPE = 'concat_projection' +_OP = 'op' +_CONV = 'conv' +_PYRAMID_POOLING = 'pyramid_pooling' +_KERNEL = 'kernel' +_RATE = 'rate' +_GRID_SIZE = 'grid_size' +_TARGET_SIZE = 'target_size' +_INPUT = 'input' + + +def dense_prediction_cell_hparams(): + """DensePredictionCell HParams. + + Returns: + A dictionary of hyper-parameters used for dense prediction cell with keys: + - reduction_size: Integer, the number of output filters for each operation + inside the cell. + - dropout_on_concat_features: Boolean, apply dropout on the concatenated + features or not. + - dropout_on_projection_features: Boolean, apply dropout on the projection + features or not. + - dropout_keep_prob: Float, when `dropout_on_concat_features' or + `dropout_on_projection_features' is True, the `keep_prob` value used + in the dropout operation. + - concat_channels: Integer, the concatenated features will be + channel-reduced to `concat_channels` channels. + - conv_rate_multiplier: Integer, used to multiply the convolution rates. + This is useful in the case when the output_stride is changed from 16 + to 8, we need to double the convolution rates correspondingly. + """ + return { + 'reduction_size': 256, + 'dropout_on_concat_features': True, + 'dropout_on_projection_features': False, + 'dropout_keep_prob': 0.9, + 'concat_channels': 256, + 'conv_rate_multiplier': 1, + } + + +class DensePredictionCell(object): + """DensePredictionCell class used as a 'layer' in semantic segmentation.""" + + def __init__(self, config, hparams=None): + """Initializes the dense prediction cell. + + Args: + config: A dictionary storing the architecture of a dense prediction cell. + hparams: A dictionary of hyper-parameters, provided by users. This + dictionary will be used to update the default dictionary returned by + dense_prediction_cell_hparams(). + + Raises: + ValueError: If `conv_rate_multiplier` has value < 1. + """ + self.hparams = dense_prediction_cell_hparams() + if hparams is not None: + self.hparams.update(hparams) + self.config = config + + # Check values in hparams are valid or not. + if self.hparams['conv_rate_multiplier'] < 1: + raise ValueError('conv_rate_multiplier cannot have value < 1.') + + def _get_pyramid_pooling_arguments( + self, crop_size, output_stride, image_grid, image_pooling_crop_size=None): + """Gets arguments for pyramid pooling. + + Args: + crop_size: A list of two integers, [crop_height, crop_width] specifying + whole patch crop size. + output_stride: Integer, output stride value for extracted features. + image_grid: A list of two integers, [image_grid_height, image_grid_width], + specifying the grid size of how the pyramid pooling will be performed. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + + Returns: + A list of (resize_value, pooled_kernel) + """ + resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride) + resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride) + # If image_pooling_crop_size is not specified, use crop_size. + if image_pooling_crop_size is None: + image_pooling_crop_size = crop_size + pooled_height = utils.scale_dimension( + image_pooling_crop_size[0], 1. / (output_stride * image_grid[0])) + pooled_width = utils.scale_dimension( + image_pooling_crop_size[1], 1. / (output_stride * image_grid[1])) + return ([resize_height, resize_width], [pooled_height, pooled_width]) + + def _parse_operation(self, config, crop_size, output_stride, + image_pooling_crop_size=None): + """Parses one operation. + + When 'operation' is 'pyramid_pooling', we compute the required + hyper-parameters and save in config. + + Args: + config: A dictionary storing required hyper-parameters for one + operation. + crop_size: A list of two integers, [crop_height, crop_width] specifying + whole patch crop size. + output_stride: Integer, output stride value for extracted features. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + + Returns: + A dictionary stores the related information for the operation. + """ + if config[_OP] == _PYRAMID_POOLING: + (config[_TARGET_SIZE], + config[_KERNEL]) = self._get_pyramid_pooling_arguments( + crop_size=crop_size, + output_stride=output_stride, + image_grid=config[_GRID_SIZE], + image_pooling_crop_size=image_pooling_crop_size) + + return config + + def build_cell(self, + features, + output_stride=16, + crop_size=None, + image_pooling_crop_size=None, + weight_decay=0.00004, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + scope=None): + """Builds the dense prediction cell based on the config. + + Args: + features: Input feature map of size [batch, height, width, channels]. + output_stride: Int, output stride at which the features were extracted. + crop_size: A list [crop_height, crop_width], determining the input + features resolution. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + weight_decay: Float, the weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Boolean, is training or not. + fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not. + scope: Optional string, specifying the variable scope. + + Returns: + Features after passing through the constructed dense prediction cell with + shape = [batch, height, width, channels] where channels are determined + by `reduction_size` returned by dense_prediction_cell_hparams(). + + Raises: + ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or + the operation is not recognized. + """ + batch_norm_params = { + 'is_training': is_training and fine_tune_batch_norm, + 'decay': 0.9997, + 'epsilon': 1e-5, + 'scale': True, + } + hparams = self.hparams + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([slim.batch_norm], **batch_norm_params): + with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]): + depth = hparams['reduction_size'] + branch_logits = [] + for i, current_config in enumerate(self.config): + scope = 'branch%d' % i + current_config = self._parse_operation( + config=current_config, + crop_size=crop_size, + output_stride=output_stride, + image_pooling_crop_size=image_pooling_crop_size) + tf.logging.info(current_config) + if current_config[_INPUT] < 0: + operation_input = features + else: + operation_input = branch_logits[current_config[_INPUT]] + if current_config[_OP] == _CONV: + if current_config[_KERNEL] == [1, 1] or current_config[ + _KERNEL] == 1: + branch_logits.append( + slim.conv2d(operation_input, depth, 1, scope=scope)) + else: + conv_rate = [r * hparams['conv_rate_multiplier'] + for r in current_config[_RATE]] + branch_logits.append( + utils.split_separable_conv2d( + operation_input, + filters=depth, + kernel_size=current_config[_KERNEL], + rate=conv_rate, + weight_decay=weight_decay, + scope=scope)) + elif current_config[_OP] == _PYRAMID_POOLING: + pooled_features = slim.avg_pool2d( + operation_input, + kernel_size=current_config[_KERNEL], + stride=[1, 1], + padding='VALID') + pooled_features = slim.conv2d( + pooled_features, + depth, + 1, + scope=scope) + pooled_features = tf.image.resize_bilinear( + pooled_features, + current_config[_TARGET_SIZE], + align_corners=True) + # Set shape for resize_height/resize_width if they are not Tensor. + resize_height = current_config[_TARGET_SIZE][0] + resize_width = current_config[_TARGET_SIZE][1] + if isinstance(resize_height, tf.Tensor): + resize_height = None + if isinstance(resize_width, tf.Tensor): + resize_width = None + pooled_features.set_shape( + [None, resize_height, resize_width, depth]) + branch_logits.append(pooled_features) + else: + raise ValueError('Unrecognized operation.') + # Merge branch logits. + concat_logits = tf.concat(branch_logits, 3) + if self.hparams['dropout_on_concat_features']: + concat_logits = slim.dropout( + concat_logits, + keep_prob=self.hparams['dropout_keep_prob'], + is_training=is_training, + scope=_CONCAT_PROJECTION_SCOPE + '_dropout') + concat_logits = slim.conv2d(concat_logits, + self.hparams['concat_channels'], + 1, + scope=_CONCAT_PROJECTION_SCOPE) + if self.hparams['dropout_on_projection_features']: + concat_logits = slim.dropout( + concat_logits, + keep_prob=self.hparams['dropout_keep_prob'], + is_training=is_training, + scope=_CONCAT_PROJECTION_SCOPE + '_dropout') + return concat_logits diff --git a/deeplab/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json b/deeplab/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json new file mode 100644 index 0000000..12b093d --- /dev/null +++ b/deeplab/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json @@ -0,0 +1 @@ +[{"kernel": 3, "rate": [1, 6], "op": "conv", "input": -1}, {"kernel": 3, "rate": [18, 15], "op": "conv", "input": 0}, {"kernel": 3, "rate": [6, 3], "op": "conv", "input": 1}, {"kernel": 3, "rate": [1, 1], "op": "conv", "input": 0}, {"kernel": 3, "rate": [6, 21], "op": "conv", "input": 0}] \ No newline at end of file diff --git a/deeplab/models/research/deeplab/core/dense_prediction_cell_test.py b/deeplab/models/research/deeplab/core/dense_prediction_cell_test.py new file mode 100644 index 0000000..1396a73 --- /dev/null +++ b/deeplab/models/research/deeplab/core/dense_prediction_cell_test.py @@ -0,0 +1,136 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for dense_prediction_cell.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from deeplab.core import dense_prediction_cell + + +class DensePredictionCellTest(tf.test.TestCase): + + def setUp(self): + self.segmentation_layer = dense_prediction_cell.DensePredictionCell( + config=[ + { + dense_prediction_cell._INPUT: -1, + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: 1, + }, + { + dense_prediction_cell._INPUT: 0, + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: 3, + dense_prediction_cell._RATE: [1, 3], + }, + { + dense_prediction_cell._INPUT: 1, + dense_prediction_cell._OP: ( + dense_prediction_cell._PYRAMID_POOLING), + dense_prediction_cell._GRID_SIZE: [1, 2], + }, + ], + hparams={'conv_rate_multiplier': 2}) + + def testPyramidPoolingArguments(self): + features_size, pooled_kernel = ( + self.segmentation_layer._get_pyramid_pooling_arguments( + crop_size=[513, 513], + output_stride=16, + image_grid=[4, 4])) + self.assertListEqual(features_size, [33, 33]) + self.assertListEqual(pooled_kernel, [9, 9]) + + def testPyramidPoolingArgumentsWithImageGrid1x1(self): + features_size, pooled_kernel = ( + self.segmentation_layer._get_pyramid_pooling_arguments( + crop_size=[257, 257], + output_stride=16, + image_grid=[1, 1])) + self.assertListEqual(features_size, [17, 17]) + self.assertListEqual(pooled_kernel, [17, 17]) + + def testParseOperationStringWithConv1x1(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: [1, 1], + }, + crop_size=[513, 513], output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._CONV) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [1, 1]) + + def testParseOperationStringWithConv3x3(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: [3, 3], + dense_prediction_cell._RATE: [9, 6], + }, + crop_size=[513, 513], output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._CONV) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [3, 3]) + self.assertEqual(operation[dense_prediction_cell._RATE], [9, 6]) + + def testParseOperationStringWithPyramidPooling2x2(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._PYRAMID_POOLING, + dense_prediction_cell._GRID_SIZE: [2, 2], + }, + crop_size=[513, 513], + output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._PYRAMID_POOLING) + # The feature maps of size [33, 33] should be covered by 2x2 kernels with + # size [17, 17]. + self.assertListEqual( + operation[dense_prediction_cell._TARGET_SIZE], [33, 33]) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [17, 17]) + + def testBuildCell(self): + with self.test_session(graph=tf.Graph()) as sess: + features = tf.random_normal([2, 33, 33, 5]) + concat_logits = self.segmentation_layer.build_cell( + features, + output_stride=8, + crop_size=[257, 257]) + sess.run(tf.global_variables_initializer()) + concat_logits = sess.run(concat_logits) + self.assertTrue(concat_logits.any()) + + def testBuildCellWithImagePoolingCropSize(self): + with self.test_session(graph=tf.Graph()) as sess: + features = tf.random_normal([2, 33, 33, 5]) + concat_logits = self.segmentation_layer.build_cell( + features, + output_stride=8, + crop_size=[257, 257], + image_pooling_crop_size=[129, 129]) + sess.run(tf.global_variables_initializer()) + concat_logits = sess.run(concat_logits) + self.assertTrue(concat_logits.any()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/feature_extractor.py b/deeplab/models/research/deeplab/core/feature_extractor.py new file mode 100644 index 0000000..553bd9b --- /dev/null +++ b/deeplab/models/research/deeplab/core/feature_extractor.py @@ -0,0 +1,711 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Extracts features for different models.""" +import copy +import functools + +import tensorflow.compat.v1 as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import nas_network +from deeplab.core import resnet_v1_beta +from deeplab.core import xception +from nets.mobilenet import conv_blocks +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 +from nets.mobilenet import mobilenet_v3 + +slim = contrib_slim + +# Default end point for MobileNetv2 (one-based indexing). +_MOBILENET_V2_FINAL_ENDPOINT = 'layer_18' +# Default end point for MobileNetv3. +_MOBILENET_V3_LARGE_FINAL_ENDPOINT = 'layer_17' +_MOBILENET_V3_SMALL_FINAL_ENDPOINT = 'layer_13' +# Default end point for EdgeTPU Mobilenet. +_MOBILENET_EDGETPU = 'layer_24' + + +def _mobilenet_v2(net, + depth_multiplier, + output_stride, + conv_defs=None, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Auxiliary function to add support for 'reuse' to mobilenet_v2. + + Args: + net: Input tensor of shape [batch_size, height, width, channels]. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + conv_defs: MobileNet con def. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + reuse: Reuse model variables. + scope: Optional variable scope. + final_endpoint: The endpoint to construct the network up to. + + Returns: + Features extracted by MobileNetv2. + """ + if divisible_by is None: + divisible_by = 8 if depth_multiplier == 1.0 else 1 + if conv_defs is None: + conv_defs = mobilenet_v2.V2_DEF + with tf.variable_scope( + scope, 'MobilenetV2', [net], reuse=reuse) as scope: + return mobilenet_v2.mobilenet_base( + net, + conv_defs=conv_defs, + depth_multiplier=depth_multiplier, + min_depth=8 if depth_multiplier == 1.0 else 1, + divisible_by=divisible_by, + final_endpoint=final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT, + output_stride=output_stride, + scope=scope) + + +def _mobilenet_v3(net, + depth_multiplier, + output_stride, + conv_defs=None, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Auxiliary function to build mobilenet v3. + + Args: + net: Input tensor of shape [batch_size, height, width, channels]. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + conv_defs: A list of ConvDef namedtuples specifying the net architecture. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + reuse: Reuse model variables. + scope: Optional variable scope. + final_endpoint: The endpoint to construct the network up to. + + Returns: + net: The output tensor. + end_points: A set of activations for external use. + + Raises: + ValueError: If conv_defs or final_endpoint is not specified. + """ + del divisible_by + with tf.variable_scope( + scope, 'MobilenetV3', [net], reuse=reuse) as scope: + if conv_defs is None: + raise ValueError('conv_defs must be specified for mobilenet v3.') + if final_endpoint is None: + raise ValueError('Final endpoint must be specified for mobilenet v3.') + net, end_points = mobilenet_v3.mobilenet_base( + net, + depth_multiplier=depth_multiplier, + conv_defs=conv_defs, + output_stride=output_stride, + final_endpoint=final_endpoint, + scope=scope) + + return net, end_points + + +def mobilenet_v3_large_seg(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Final mobilenet v3 large model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_LARGE) + + # Reduce the filters by a factor of 2 in the last block. + for layer, expansion in [(13, 336), (14, 480), (15, 480), (16, None)]: + conv_defs['spec'][layer].params['num_outputs'] /= 2 + # Update expansion size + if expansion is not None: + factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs'] + conv_defs['spec'][layer].params[ + 'expansion_size'] = mobilenet_v3.expand_input(factor) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, + final_endpoint=_MOBILENET_V3_LARGE_FINAL_ENDPOINT) + + +def mobilenet_edgetpu(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """EdgeTPU version of mobilenet model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_EDGETPU) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, # the scope is 'MobilenetEdgeTPU' + final_endpoint=_MOBILENET_EDGETPU) + + +def mobilenet_v3_small_seg(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Final mobilenet v3 small model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_SMALL) + + # Reduce the filters by a factor of 2 in the last block. + for layer, expansion in [(9, 144), (10, 288), (11, 288), (12, None)]: + conv_defs['spec'][layer].params['num_outputs'] /= 2 + # Update expansion size + if expansion is not None: + factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs'] + conv_defs['spec'][layer].params[ + 'expansion_size'] = mobilenet_v3.expand_input(factor) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, + final_endpoint=_MOBILENET_V3_SMALL_FINAL_ENDPOINT) + + +# A map from network name to network function. +networks_map = { + 'mobilenet_v2': _mobilenet_v2, + 'mobilenet_edgetpu': mobilenet_edgetpu, + 'mobilenet_v3_large_seg': mobilenet_v3_large_seg, + 'mobilenet_v3_small_seg': mobilenet_v3_small_seg, + 'resnet_v1_18': resnet_v1_beta.resnet_v1_18, + 'resnet_v1_18_beta': resnet_v1_beta.resnet_v1_18_beta, + 'resnet_v1_50': resnet_v1_beta.resnet_v1_50, + 'resnet_v1_50_beta': resnet_v1_beta.resnet_v1_50_beta, + 'resnet_v1_101': resnet_v1_beta.resnet_v1_101, + 'resnet_v1_101_beta': resnet_v1_beta.resnet_v1_101_beta, + 'xception_41': xception.xception_41, + 'xception_65': xception.xception_65, + 'xception_71': xception.xception_71, + 'nas_pnasnet': nas_network.pnasnet, + 'nas_hnasnet': nas_network.hnasnet, +} + + +def mobilenet_v2_arg_scope(is_training=True, + weight_decay=0.00004, + stddev=0.09, + activation=tf.nn.relu6, + bn_decay=0.997, + bn_epsilon=None, + bn_renorm=None): + """Defines the default MobilenetV2 arg scope. + + Args: + is_training: Whether or not we're training the model. If this is set to None + is_training parameter in batch_norm is not set. Please note that this also + sets the is_training parameter in dropout to None. + weight_decay: The weight decay to use for regularizing the model. + stddev: Standard deviation for initialization, if negative uses xavier. + activation: If True, a modified activation is used (initialized ~ReLU6). + bn_decay: decay for the batch norm moving averages. + bn_epsilon: batch normalization epsilon. + bn_renorm: whether to use batchnorm renormalization + + Returns: + An `arg_scope` to use for the mobilenet v1 model. + """ + batch_norm_params = { + 'center': True, + 'scale': True, + 'decay': bn_decay, + } + if bn_epsilon is not None: + batch_norm_params['epsilon'] = bn_epsilon + if is_training is not None: + batch_norm_params['is_training'] = is_training + if bn_renorm is not None: + batch_norm_params['renorm'] = bn_renorm + dropout_params = {} + if is_training is not None: + dropout_params['is_training'] = is_training + + instance_norm_params = { + 'center': True, + 'scale': True, + 'epsilon': 0.001, + } + + if stddev < 0: + weight_intitializer = slim.initializers.xavier_initializer() + else: + weight_intitializer = tf.truncated_normal_initializer(stddev=stddev) + + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope( + [slim.conv2d, slim.fully_connected, slim.separable_conv2d], + weights_initializer=weight_intitializer, + activation_fn=activation, + normalizer_fn=slim.batch_norm), \ + slim.arg_scope( + [conv_blocks.expanded_conv], normalizer_fn=slim.batch_norm), \ + slim.arg_scope([mobilenet.apply_activation], activation_fn=activation),\ + slim.arg_scope([slim.batch_norm], **batch_norm_params), \ + slim.arg_scope([mobilenet.mobilenet_base, mobilenet.mobilenet], + is_training=is_training),\ + slim.arg_scope([slim.dropout], **dropout_params), \ + slim.arg_scope([slim.instance_norm], **instance_norm_params), \ + slim.arg_scope([slim.conv2d], \ + weights_regularizer=slim.l2_regularizer(weight_decay)), \ + slim.arg_scope([slim.separable_conv2d], weights_regularizer=None), \ + slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME') as s: + return s + + +# A map from network name to network arg scope. +arg_scopes_map = { + 'mobilenet_v2': mobilenet_v2.training_scope, + 'mobilenet_edgetpu': mobilenet_v2_arg_scope, + 'mobilenet_v3_large_seg': mobilenet_v2_arg_scope, + 'mobilenet_v3_small_seg': mobilenet_v2_arg_scope, + 'resnet_v1_18': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_18_beta': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_50': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_50_beta': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_101': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_101_beta': resnet_v1_beta.resnet_arg_scope, + 'xception_41': xception.xception_arg_scope, + 'xception_65': xception.xception_arg_scope, + 'xception_71': xception.xception_arg_scope, + 'nas_pnasnet': nas_network.nas_arg_scope, + 'nas_hnasnet': nas_network.nas_arg_scope, +} + +# Names for end point features. +DECODER_END_POINTS = 'decoder_end_points' + +# A dictionary from network name to a map of end point features. +networks_to_feature_maps = { + 'mobilenet_v2': { + DECODER_END_POINTS: { + 4: ['layer_4/depthwise_output'], + 8: ['layer_7/depthwise_output'], + 16: ['layer_14/depthwise_output'], + }, + }, + 'mobilenet_v3_large_seg': { + DECODER_END_POINTS: { + 4: ['layer_4/depthwise_output'], + 8: ['layer_7/depthwise_output'], + 16: ['layer_13/depthwise_output'], + }, + }, + 'mobilenet_v3_small_seg': { + DECODER_END_POINTS: { + 4: ['layer_2/depthwise_output'], + 8: ['layer_4/depthwise_output'], + 16: ['layer_9/depthwise_output'], + }, + }, + 'resnet_v1_18': { + DECODER_END_POINTS: { + 4: ['block1/unit_1/lite_bottleneck_v1/conv2'], + 8: ['block2/unit_1/lite_bottleneck_v1/conv2'], + 16: ['block3/unit_1/lite_bottleneck_v1/conv2'], + }, + }, + 'resnet_v1_18_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_1/lite_bottleneck_v1/conv2'], + 8: ['block2/unit_1/lite_bottleneck_v1/conv2'], + 16: ['block3/unit_1/lite_bottleneck_v1/conv2'], + }, + }, + 'resnet_v1_50': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_5/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_50_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_5/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_101': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_22/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_101_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_22/bottleneck_v1/conv3'], + }, + }, + 'xception_41': { + DECODER_END_POINTS: { + 4: ['entry_flow/block2/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'xception_65': { + DECODER_END_POINTS: { + 4: ['entry_flow/block2/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'xception_71': { + DECODER_END_POINTS: { + 4: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block5/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'nas_pnasnet': { + DECODER_END_POINTS: { + 4: ['Stem'], + 8: ['Cell_3'], + 16: ['Cell_7'], + }, + }, + 'nas_hnasnet': { + DECODER_END_POINTS: { + 4: ['Cell_2'], + 8: ['Cell_5'], + 16: ['Cell_7'], + }, + }, +} + +# A map from feature extractor name to the network name scope used in the +# ImageNet pretrained versions of these models. +name_scope = { + 'mobilenet_v2': 'MobilenetV2', + 'mobilenet_edgetpu': 'MobilenetEdgeTPU', + 'mobilenet_v3_large_seg': 'MobilenetV3', + 'mobilenet_v3_small_seg': 'MobilenetV3', + 'resnet_v1_18': 'resnet_v1_18', + 'resnet_v1_18_beta': 'resnet_v1_18', + 'resnet_v1_50': 'resnet_v1_50', + 'resnet_v1_50_beta': 'resnet_v1_50', + 'resnet_v1_101': 'resnet_v1_101', + 'resnet_v1_101_beta': 'resnet_v1_101', + 'xception_41': 'xception_41', + 'xception_65': 'xception_65', + 'xception_71': 'xception_71', + 'nas_pnasnet': 'pnasnet', + 'nas_hnasnet': 'hnasnet', +} + +# Mean pixel value. +_MEAN_RGB = [123.15, 115.90, 103.06] + + +def _preprocess_subtract_imagenet_mean(inputs, dtype=tf.float32): + """Subtract Imagenet mean RGB value.""" + mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3]) + num_channels = tf.shape(inputs)[-1] + # We set mean pixel as 0 for the non-RGB channels. + mean_rgb_extended = tf.concat( + [mean_rgb, tf.zeros([1, 1, 1, num_channels - 3])], axis=3) + return tf.cast(inputs - mean_rgb_extended, dtype=dtype) + + +def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32): + """Map image values from [0, 255] to [-1, 1].""" + preprocessed_inputs = (2.0 / 255.0) * tf.to_float(inputs) - 1.0 + return tf.cast(preprocessed_inputs, dtype=dtype) + + +_PREPROCESS_FN = { + 'mobilenet_v2': _preprocess_zero_mean_unit_range, + 'mobilenet_edgetpu': _preprocess_zero_mean_unit_range, + 'mobilenet_v3_large_seg': _preprocess_zero_mean_unit_range, + 'mobilenet_v3_small_seg': _preprocess_zero_mean_unit_range, + 'resnet_v1_18': _preprocess_subtract_imagenet_mean, + 'resnet_v1_18_beta': _preprocess_zero_mean_unit_range, + 'resnet_v1_50': _preprocess_subtract_imagenet_mean, + 'resnet_v1_50_beta': _preprocess_zero_mean_unit_range, + 'resnet_v1_101': _preprocess_subtract_imagenet_mean, + 'resnet_v1_101_beta': _preprocess_zero_mean_unit_range, + 'xception_41': _preprocess_zero_mean_unit_range, + 'xception_65': _preprocess_zero_mean_unit_range, + 'xception_71': _preprocess_zero_mean_unit_range, + 'nas_pnasnet': _preprocess_zero_mean_unit_range, + 'nas_hnasnet': _preprocess_zero_mean_unit_range, +} + + +def mean_pixel(model_variant=None): + """Gets mean pixel value. + + This function returns different mean pixel value, depending on the input + model_variant which adopts different preprocessing functions. We currently + handle the following preprocessing functions: + (1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value. + (2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5]. + The return values are used in a way that the padded regions after + pre-processing will contain value 0. + + Args: + model_variant: Model variant (string) for feature extraction. For + backwards compatibility, model_variant=None returns _MEAN_RGB. + + Returns: + Mean pixel value. + """ + if model_variant in ['resnet_v1_50', + 'resnet_v1_101'] or model_variant is None: + return _MEAN_RGB + else: + return [127.5, 127.5, 127.5] + + +def extract_features(images, + output_stride=8, + multi_grid=None, + depth_multiplier=1.0, + divisible_by=None, + final_endpoint=None, + model_variant=None, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + regularize_depthwise=False, + preprocess_images=True, + preprocessed_images_dtype=tf.float32, + num_classes=None, + global_pool=False, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + use_bounded_activation=False): + """Extracts features by the particular model_variant. + + Args: + images: A tensor of size [batch, height, width, channels]. + output_stride: The ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops used in MobileNet. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + final_endpoint: The MobileNet endpoint to construct the network up to. + model_variant: Model variant for feature extraction. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + preprocess_images: Performs preprocessing on images or not. Defaults to + True. Set to False if preprocessing will be done by other functions. We + supprot two types of preprocessing: (1) Mean pixel substraction and (2) + Pixel values normalization to be [-1, 1]. + preprocessed_images_dtype: The type after the preprocessing function. + num_classes: Number of classes for image classification task. Defaults + to None for dense prediction tasks. + global_pool: Global pooling for image classification task. Defaults to + False, since dense prediction tasks do not use this. + nas_architecture_options: A dictionary storing NAS architecture options. + It is either None or its kerys are: + - `nas_stem_output_num_conv_filters`: Number of filters of the NAS stem + output tensor. + - `nas_use_classification_head`: Boolean, use image classification head. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. It is either None or its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. Currently, + bounded activation is only used in xception model. + + Returns: + features: A tensor of size [batch, feature_height, feature_width, + feature_channels], where feature_height/feature_width are determined + by the images height/width and output_stride. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: Unrecognized model variant. + """ + if 'resnet' in model_variant: + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.95, + batch_norm_epsilon=1e-5, + batch_norm_scale=True) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + multi_grid=multi_grid, + reuse=reuse, + scope=name_scope[model_variant]) + elif 'xception' in model_variant: + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.9997, + batch_norm_epsilon=1e-3, + batch_norm_scale=True, + regularize_depthwise=regularize_depthwise, + use_bounded_activation=use_bounded_activation) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=name_scope[model_variant]) + elif 'mobilenet' in model_variant or model_variant.startswith('mnas'): + arg_scope = arg_scopes_map[model_variant]( + is_training=(is_training and fine_tune_batch_norm), + weight_decay=weight_decay) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + depth_multiplier=depth_multiplier, + divisible_by=divisible_by, + output_stride=output_stride, + reuse=reuse, + scope=name_scope[model_variant], + final_endpoint=final_endpoint) + elif model_variant.startswith('nas'): + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.9997, + batch_norm_epsilon=1e-3) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + nas_architecture_options=nas_architecture_options, + nas_training_hyper_parameters=nas_training_hyper_parameters, + reuse=reuse, + scope=name_scope[model_variant]) + else: + raise ValueError('Unknown model variant %s.' % model_variant) + + return features, end_points + + +def get_network(network_name, preprocess_images, + preprocessed_images_dtype=tf.float32, arg_scope=None): + """Gets the network. + + Args: + network_name: Network name. + preprocess_images: Preprocesses the images or not. + preprocessed_images_dtype: The type after the preprocessing function. + arg_scope: Optional, arg_scope to build the network. If not provided the + default arg_scope of the network would be used. + + Returns: + A network function that is used to extract features. + + Raises: + ValueError: network is not supported. + """ + if network_name not in networks_map: + raise ValueError('Unsupported network %s.' % network_name) + arg_scope = arg_scope or arg_scopes_map[network_name]() + def _identity_function(inputs, dtype=preprocessed_images_dtype): + return tf.cast(inputs, dtype=dtype) + if preprocess_images: + preprocess_function = _PREPROCESS_FN[network_name] + else: + preprocess_function = _identity_function + func = networks_map[network_name] + @functools.wraps(func) + def network_fn(inputs, *args, **kwargs): + with slim.arg_scope(arg_scope): + return func(preprocess_function(inputs, preprocessed_images_dtype), + *args, **kwargs) + return network_fn diff --git a/deeplab/models/research/deeplab/core/nas_cell.py b/deeplab/models/research/deeplab/core/nas_cell.py new file mode 100644 index 0000000..d179082 --- /dev/null +++ b/deeplab/models/research/deeplab/core/nas_cell.py @@ -0,0 +1,221 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Cell structure used by NAS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from six.moves import range +from six.moves import zip +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import xception as xception_utils +from deeplab.core.utils import resize_bilinear +from deeplab.core.utils import scale_dimension +from tensorflow.contrib.slim.nets import resnet_utils + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim + +separable_conv2d_same = functools.partial(xception_utils.separable_conv2d_same, + regularize_depthwise=True) + + +class NASBaseCell(object): + """NASNet Cell class that is used as a 'layer' in image architectures.""" + + def __init__(self, num_conv_filters, operations, used_hiddenstates, + hiddenstate_indices, drop_path_keep_prob, total_num_cells, + total_training_steps, batch_norm_fn=slim.batch_norm): + """Init function. + + For more details about NAS cell, see + https://arxiv.org/abs/1707.07012 and https://arxiv.org/abs/1712.00559. + + Args: + num_conv_filters: The number of filters for each convolution operation. + operations: List of operations that are performed in the NASNet Cell in + order. + used_hiddenstates: Binary array that signals if the hiddenstate was used + within the cell. This is used to determine what outputs of the cell + should be concatenated together. + hiddenstate_indices: Determines what hiddenstates should be combined + together with the specified operations to create the NASNet cell. + drop_path_keep_prob: Float, drop path keep probability. + total_num_cells: Integer, total number of cells. + total_training_steps: Integer, total training steps. + batch_norm_fn: Function, batch norm function. Defaults to + slim.batch_norm. + """ + if len(hiddenstate_indices) != len(operations): + raise ValueError( + 'Number of hiddenstate_indices and operations should be the same.') + if len(operations) % 2: + raise ValueError('Number of operations should be even.') + self._num_conv_filters = num_conv_filters + self._operations = operations + self._used_hiddenstates = used_hiddenstates + self._hiddenstate_indices = hiddenstate_indices + self._drop_path_keep_prob = drop_path_keep_prob + self._total_num_cells = total_num_cells + self._total_training_steps = total_training_steps + self._batch_norm_fn = batch_norm_fn + + def __call__(self, net, scope, filter_scaling, stride, prev_layer, cell_num): + """Runs the conv cell.""" + self._cell_num = cell_num + self._filter_scaling = filter_scaling + self._filter_size = int(self._num_conv_filters * filter_scaling) + + with tf.variable_scope(scope): + net = self._cell_base(net, prev_layer) + for i in range(len(self._operations) // 2): + with tf.variable_scope('comb_iter_{}'.format(i)): + h1 = net[self._hiddenstate_indices[i * 2]] + h2 = net[self._hiddenstate_indices[i * 2 + 1]] + with tf.variable_scope('left'): + h1 = self._apply_conv_operation( + h1, self._operations[i * 2], stride, + self._hiddenstate_indices[i * 2] < 2) + with tf.variable_scope('right'): + h2 = self._apply_conv_operation( + h2, self._operations[i * 2 + 1], stride, + self._hiddenstate_indices[i * 2 + 1] < 2) + with tf.variable_scope('combine'): + h = h1 + h2 + net.append(h) + + with tf.variable_scope('cell_output'): + net = self._combine_unused_states(net) + + return net + + def _cell_base(self, net, prev_layer): + """Runs the beginning of the conv cell before the chosen ops are run.""" + filter_size = self._filter_size + + if prev_layer is None: + prev_layer = net + else: + if net.shape[2] != prev_layer.shape[2]: + prev_layer = resize_bilinear( + prev_layer, tf.shape(net)[1:3], prev_layer.dtype) + if filter_size != prev_layer.shape[3]: + prev_layer = tf.nn.relu(prev_layer) + prev_layer = slim.conv2d(prev_layer, filter_size, 1, scope='prev_1x1') + prev_layer = self._batch_norm_fn(prev_layer, scope='prev_bn') + + net = tf.nn.relu(net) + net = slim.conv2d(net, filter_size, 1, scope='1x1') + net = self._batch_norm_fn(net, scope='beginning_bn') + net = tf.split(axis=3, num_or_size_splits=1, value=net) + net.append(prev_layer) + return net + + def _apply_conv_operation(self, net, operation, stride, + is_from_original_input): + """Applies the predicted conv operation to net.""" + if stride > 1 and not is_from_original_input: + stride = 1 + input_filters = net.shape[3] + filter_size = self._filter_size + if 'separable' in operation: + num_layers = int(operation.split('_')[-1]) + kernel_size = int(operation.split('x')[0][-1]) + for layer_num in range(num_layers): + net = tf.nn.relu(net) + net = separable_conv2d_same( + net, + filter_size, + kernel_size, + depth_multiplier=1, + scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1), + stride=stride) + net = self._batch_norm_fn( + net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1)) + stride = 1 + elif 'atrous' in operation: + kernel_size = int(operation.split('x')[0][-1]) + net = tf.nn.relu(net) + if stride == 2: + scaled_height = scale_dimension(tf.shape(net)[1], 0.5) + scaled_width = scale_dimension(tf.shape(net)[2], 0.5) + net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype) + net = resnet_utils.conv2d_same( + net, filter_size, kernel_size, rate=1, stride=1, + scope='atrous_{0}x{0}'.format(kernel_size)) + else: + net = resnet_utils.conv2d_same( + net, filter_size, kernel_size, rate=2, stride=1, + scope='atrous_{0}x{0}'.format(kernel_size)) + net = self._batch_norm_fn(net, scope='bn_atr_{0}x{0}'.format(kernel_size)) + elif operation in ['none']: + if stride > 1 or (input_filters != filter_size): + net = tf.nn.relu(net) + net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1') + net = self._batch_norm_fn(net, scope='bn_1') + elif 'pool' in operation: + pooling_type = operation.split('_')[0] + pooling_shape = int(operation.split('_')[-1].split('x')[0]) + if pooling_type == 'avg': + net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding='SAME') + elif pooling_type == 'max': + net = slim.max_pool2d(net, pooling_shape, stride=stride, padding='SAME') + else: + raise ValueError('Unimplemented pooling type: ', pooling_type) + if input_filters != filter_size: + net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1') + net = self._batch_norm_fn(net, scope='bn_1') + else: + raise ValueError('Unimplemented operation', operation) + + if operation != 'none': + net = self._apply_drop_path(net) + return net + + def _combine_unused_states(self, net): + """Concatenates the unused hidden states of the cell.""" + used_hiddenstates = self._used_hiddenstates + states_to_combine = ([ + h for h, is_used in zip(net, used_hiddenstates) if not is_used]) + net = tf.concat(values=states_to_combine, axis=3) + return net + + @contrib_framework.add_arg_scope + def _apply_drop_path(self, net): + """Apply drop_path regularization.""" + drop_path_keep_prob = self._drop_path_keep_prob + if drop_path_keep_prob < 1.0: + # Scale keep prob by layer number. + assert self._cell_num != -1 + layer_ratio = (self._cell_num + 1) / float(self._total_num_cells) + drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob) + # Decrease keep prob over time. + current_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + current_ratio = tf.minimum(1.0, current_step / self._total_training_steps) + drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob)) + # Drop path. + noise_shape = [tf.shape(net)[0], 1, 1, 1] + random_tensor = drop_path_keep_prob + random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32) + binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype) + keep_prob_inv = tf.cast(1.0 / drop_path_keep_prob, net.dtype) + net = net * keep_prob_inv * binary_tensor + return net diff --git a/deeplab/models/research/deeplab/core/nas_genotypes.py b/deeplab/models/research/deeplab/core/nas_genotypes.py new file mode 100644 index 0000000..a2e6dd5 --- /dev/null +++ b/deeplab/models/research/deeplab/core/nas_genotypes.py @@ -0,0 +1,45 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Genotypes used by NAS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import nas_cell + +slim = contrib_slim + + +class PNASCell(nas_cell.NASBaseCell): + """Configuration and construction of the PNASNet-5 Cell.""" + + def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells, + total_training_steps, batch_norm_fn=slim.batch_norm): + # Name of operations: op_kernel-size_num-layers. + operations = [ + 'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3', + 'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3', + 'separable_3x3_2', 'none' + ] + used_hiddenstates = [1, 1, 0, 0, 0, 0, 0] + hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0] + + super(PNASCell, self).__init__( + num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, + drop_path_keep_prob, total_num_cells, total_training_steps, + batch_norm_fn) diff --git a/deeplab/models/research/deeplab/core/nas_network.py b/deeplab/models/research/deeplab/core/nas_network.py new file mode 100644 index 0000000..1da2e04 --- /dev/null +++ b/deeplab/models/research/deeplab/core/nas_network.py @@ -0,0 +1,368 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Network structure used by NAS. + +Here we provide a few NAS backbones for semantic segmentation. +Currently, we have + +1. pnasnet +"Progressive Neural Architecture Search", Chenxi Liu, Barret Zoph, +Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, +Alan Yuille, Jonathan Huang, Kevin Murphy. In ECCV, 2018. + +2. hnasnet (also called Auto-DeepLab) +"Auto-DeepLab: Hierarchical Neural Architecture Search for Semantic +Image Segmentation", Chenxi Liu, Liang-Chieh Chen, Florian Schroff, +Hartwig Adam, Wei Hua, Alan Yuille, Li Fei-Fei. In CVPR, 2019. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import slim as contrib_slim +from tensorflow.contrib import training as contrib_training + +from deeplab.core import nas_genotypes +from deeplab.core import utils +from deeplab.core.nas_cell import NASBaseCell +from tensorflow.contrib.slim.nets import resnet_utils + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim +resize_bilinear = utils.resize_bilinear +scale_dimension = utils.scale_dimension + + +def config(num_conv_filters=20, + total_training_steps=500000, + drop_path_keep_prob=1.0): + return contrib_training.HParams( + # Multiplier when spatial size is reduced by 2. + filter_scaling_rate=2.0, + # Number of filters of the stem output tensor. + num_conv_filters=num_conv_filters, + # Probability to keep each path in the cell when training. + drop_path_keep_prob=drop_path_keep_prob, + # Total training steps to help drop path probability calculation. + total_training_steps=total_training_steps, + ) + + +def nas_arg_scope(weight_decay=4e-5, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + sync_batch_norm_method='None'): + """Default arg scope for the NAS models.""" + batch_norm_params = { + # Decay for the moving averages. + 'decay': batch_norm_decay, + # epsilon to prevent 0s in variance. + 'epsilon': batch_norm_epsilon, + 'scale': True, + } + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + weights_regularizer = contrib_layers.l2_regularizer(weight_decay) + weights_initializer = contrib_layers.variance_scaling_initializer( + factor=1 / 3.0, mode='FAN_IN', uniform=True) + with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d], + weights_regularizer=weights_regularizer, + weights_initializer=weights_initializer): + with arg_scope([slim.fully_connected], + activation_fn=None, scope='FC'): + with arg_scope([slim.conv2d, slim.separable_conv2d], + activation_fn=None, biases_initializer=None): + with arg_scope([batch_norm], **batch_norm_params) as sc: + return sc + + +def _nas_stem(inputs, + batch_norm_fn=slim.batch_norm): + """Stem used for NAS models.""" + net = resnet_utils.conv2d_same(inputs, 64, 3, stride=2, scope='conv0') + net = batch_norm_fn(net, scope='conv0_bn') + net = tf.nn.relu(net) + net = resnet_utils.conv2d_same(net, 64, 3, stride=1, scope='conv1') + net = batch_norm_fn(net, scope='conv1_bn') + cell_outputs = [net] + net = tf.nn.relu(net) + net = resnet_utils.conv2d_same(net, 128, 3, stride=2, scope='conv2') + net = batch_norm_fn(net, scope='conv2_bn') + cell_outputs.append(net) + return net, cell_outputs + + +def _build_nas_base(images, + cell, + backbone, + num_classes, + hparams, + global_pool=False, + output_stride=16, + nas_use_classification_head=False, + reuse=None, + scope=None, + final_endpoint=None, + batch_norm_fn=slim.batch_norm, + nas_remove_os32_stride=False): + """Constructs a NAS model. + + Args: + images: A tensor of size [batch, height, width, channels]. + cell: Cell structure used in the network. + backbone: Backbone structure used in the network. A list of integers in + which value 0 means "output_stride=4", value 1 means "output_stride=8", + value 2 means "output_stride=16", and value 3 means "output_stride=32". + num_classes: Number of classes to predict. + hparams: Hyperparameters needed to construct the network. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: Interger, the stride of output feature maps. + nas_use_classification_head: Boolean, use image classification head. + reuse: Whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + final_endpoint: The endpoint to construct the network up to. + batch_norm_fn: Batch norm function. + nas_remove_os32_stride: Boolean, remove stride in output_stride 32 branch. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If output_stride is not a multiple of backbone output stride. + """ + with tf.variable_scope(scope, 'nas', [images], reuse=reuse): + end_points = {} + def add_and_check_endpoint(endpoint_name, net): + end_points[endpoint_name] = net + return final_endpoint and (endpoint_name == final_endpoint) + + net, cell_outputs = _nas_stem(images, + batch_norm_fn=batch_norm_fn) + if add_and_check_endpoint('Stem', net): + return net, end_points + + # Run the cells + filter_scaling = 1.0 + for cell_num in range(len(backbone)): + stride = 1 + if cell_num == 0: + if backbone[0] == 1: + stride = 2 + filter_scaling *= hparams.filter_scaling_rate + else: + if backbone[cell_num] == backbone[cell_num - 1] + 1: + stride = 2 + if backbone[cell_num] == 3 and nas_remove_os32_stride: + stride = 1 + filter_scaling *= hparams.filter_scaling_rate + elif backbone[cell_num] == backbone[cell_num - 1] - 1: + if backbone[cell_num - 1] == 3 and nas_remove_os32_stride: + # No need to rescale features. + pass + else: + # Scale features by a factor of 2. + scaled_height = scale_dimension(net.shape[1].value, 2) + scaled_width = scale_dimension(net.shape[2].value, 2) + net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype) + filter_scaling /= hparams.filter_scaling_rate + net = cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=cell_outputs[-2], + cell_num=cell_num) + if add_and_check_endpoint('Cell_{}'.format(cell_num), net): + return net, end_points + cell_outputs.append(net) + net = tf.nn.relu(net) + + if nas_use_classification_head: + # Add image classification head. + # We will expand the filters for different output_strides. + output_stride_to_expanded_filters = {8: 256, 16: 512, 32: 1024} + current_output_scale = 2 + backbone[-1] + current_output_stride = 2 ** current_output_scale + if output_stride % current_output_stride != 0: + raise ValueError( + 'output_stride must be a multiple of backbone output stride.') + output_stride //= current_output_stride + rate = 1 + if current_output_stride != 32: + num_downsampling = 5 - current_output_scale + for i in range(num_downsampling): + # Gradually donwsample feature maps to output stride = 32. + target_output_stride = 2 ** (current_output_scale + 1 + i) + target_filters = output_stride_to_expanded_filters[ + target_output_stride] + scope = 'downsample_os{}'.format(target_output_stride) + if output_stride != 1: + stride = 2 + output_stride //= 2 + else: + stride = 1 + rate *= 2 + net = resnet_utils.conv2d_same( + net, target_filters, 3, stride=stride, rate=rate, + scope=scope + '_conv') + net = batch_norm_fn(net, scope=scope + '_bn') + add_and_check_endpoint(scope, net) + net = tf.nn.relu(net) + # Apply 1x1 convolution to expand dimension to 2048. + scope = 'classification_head' + net = slim.conv2d(net, 2048, 1, scope=scope + '_conv') + net = batch_norm_fn(net, scope=scope + '_bn') + add_and_check_endpoint(scope, net) + net = tf.nn.relu(net) + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True) + if num_classes is not None: + net = slim.conv2d(net, num_classes, 1, activation_fn=None, + normalizer_fn=None, scope='logits') + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def pnasnet(images, + num_classes, + is_training=True, + global_pool=False, + output_stride=16, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + reuse=None, + scope='pnasnet', + final_endpoint=None, + sync_batch_norm_method='None'): + """Builds PNASNet model.""" + if nas_architecture_options is None: + raise ValueError( + 'Using NAS model variants. nas_architecture_options cannot be None.') + hparams = config(num_conv_filters=nas_architecture_options[ + 'nas_stem_output_num_conv_filters']) + if nas_training_hyper_parameters: + hparams.set_hparam('drop_path_keep_prob', + nas_training_hyper_parameters['drop_path_keep_prob']) + hparams.set_hparam('total_training_steps', + nas_training_hyper_parameters['total_training_steps']) + if not is_training: + tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.') + hparams.set_hparam('drop_path_keep_prob', 1.0) + tf.logging.info(hparams) + if output_stride == 8: + backbone = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + elif output_stride == 16: + backbone = [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2] + elif output_stride == 32: + backbone = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3] + else: + raise ValueError('Unsupported output_stride ', output_stride) + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + cell = nas_genotypes.PNASCell(hparams.num_conv_filters, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps, + batch_norm_fn=batch_norm) + with arg_scope([slim.dropout, batch_norm], is_training=is_training): + return _build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + global_pool=global_pool, + output_stride=output_stride, + nas_use_classification_head=nas_architecture_options[ + 'nas_use_classification_head'], + reuse=reuse, + scope=scope, + final_endpoint=final_endpoint, + batch_norm_fn=batch_norm, + nas_remove_os32_stride=nas_architecture_options[ + 'nas_remove_os32_stride']) + + +# pylint: disable=unused-argument +def hnasnet(images, + num_classes, + is_training=True, + global_pool=False, + output_stride=8, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + reuse=None, + scope='hnasnet', + final_endpoint=None, + sync_batch_norm_method='None'): + """Builds hierarchical model.""" + if nas_architecture_options is None: + raise ValueError( + 'Using NAS model variants. nas_architecture_options cannot be None.') + hparams = config(num_conv_filters=nas_architecture_options[ + 'nas_stem_output_num_conv_filters']) + if nas_training_hyper_parameters: + hparams.set_hparam('drop_path_keep_prob', + nas_training_hyper_parameters['drop_path_keep_prob']) + hparams.set_hparam('total_training_steps', + nas_training_hyper_parameters['total_training_steps']) + if not is_training: + tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.') + hparams.set_hparam('drop_path_keep_prob', 1.0) + tf.logging.info(hparams) + operations = [ + 'atrous_5x5', 'separable_3x3_2', 'separable_3x3_2', 'atrous_3x3', + 'separable_3x3_2', 'separable_3x3_2', 'separable_5x5_2', + 'separable_5x5_2', 'separable_5x5_2', 'atrous_5x5' + ] + used_hiddenstates = [1, 1, 0, 0, 0, 0, 0] + hiddenstate_indices = [1, 0, 1, 0, 3, 1, 4, 2, 3, 5] + backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1] + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + cell = NASBaseCell(hparams.num_conv_filters, + operations, + used_hiddenstates, + hiddenstate_indices, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps, + batch_norm_fn=batch_norm) + with arg_scope([slim.dropout, batch_norm], is_training=is_training): + return _build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + global_pool=global_pool, + output_stride=output_stride, + nas_use_classification_head=nas_architecture_options[ + 'nas_use_classification_head'], + reuse=reuse, + scope=scope, + final_endpoint=final_endpoint, + batch_norm_fn=batch_norm, + nas_remove_os32_stride=nas_architecture_options[ + 'nas_remove_os32_stride']) diff --git a/deeplab/models/research/deeplab/core/nas_network_test.py b/deeplab/models/research/deeplab/core/nas_network_test.py new file mode 100644 index 0000000..18621b2 --- /dev/null +++ b/deeplab/models/research/deeplab/core/nas_network_test.py @@ -0,0 +1,111 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for resnet_v1_beta module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim +from tensorflow.contrib import training as contrib_training + +from deeplab.core import nas_genotypes +from deeplab.core import nas_network + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Creates test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.to_float( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels])) + + +class NASNetworkTest(tf.test.TestCase): + """Tests with complete small NAS networks.""" + + def _pnasnet(self, + images, + backbone, + num_classes, + is_training=True, + output_stride=16, + final_endpoint=None): + """Build PNASNet model backbone.""" + hparams = contrib_training.HParams( + filter_scaling_rate=2.0, + num_conv_filters=10, + drop_path_keep_prob=1.0, + total_training_steps=200000, + ) + if not is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + cell = nas_genotypes.PNASCell(hparams.num_conv_filters, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps) + with arg_scope([slim.dropout, slim.batch_norm], is_training=is_training): + return nas_network._build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + reuse=tf.AUTO_REUSE, + scope='pnasnet_small', + final_endpoint=final_endpoint) + + def testFullyConvolutionalEndpointShapes(self): + num_classes = 10 + backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1] + inputs = create_test_input(None, 321, 321, 3) + with slim.arg_scope(nas_network.nas_arg_scope()): + _, end_points = self._pnasnet(inputs, backbone, num_classes) + endpoint_to_shape = { + 'Stem': [None, 81, 81, 128], + 'Cell_0': [None, 81, 81, 50], + 'Cell_1': [None, 81, 81, 50], + 'Cell_2': [None, 81, 81, 50], + 'Cell_3': [None, 41, 41, 100], + 'Cell_4': [None, 21, 21, 200], + 'Cell_5': [None, 41, 41, 100], + 'Cell_6': [None, 21, 21, 200], + 'Cell_7': [None, 21, 21, 200], + 'Cell_8': [None, 11, 11, 400], + 'Cell_9': [None, 11, 11, 400], + 'Cell_10': [None, 21, 21, 200], + 'Cell_11': [None, 41, 41, 100] + } + for endpoint, shape in endpoint_to_shape.items(): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/preprocess_utils.py b/deeplab/models/research/deeplab/core/preprocess_utils.py new file mode 100644 index 0000000..440717e --- /dev/null +++ b/deeplab/models/research/deeplab/core/preprocess_utils.py @@ -0,0 +1,533 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions related to preprocessing inputs.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range +from six.moves import zip +import tensorflow as tf + + +def flip_dim(tensor_list, prob=0.5, dim=1): + """Randomly flips a dimension of the given tensor. + + The decision to randomly flip the `Tensors` is made together. In other words, + all or none of the images pass in are flipped. + + Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so + that we can control for the probability as well as ensure the same decision + is applied across the images. + + Args: + tensor_list: A list of `Tensors` with the same number of dimensions. + prob: The probability of a left-right flip. + dim: The dimension to flip, 0, 1, .. + + Returns: + outputs: A list of the possibly flipped `Tensors` as well as an indicator + `Tensor` at the end whose value is `True` if the inputs were flipped and + `False` otherwise. + + Raises: + ValueError: If dim is negative or greater than the dimension of a `Tensor`. + """ + random_value = tf.random_uniform([]) + + def flip(): + flipped = [] + for tensor in tensor_list: + if dim < 0 or dim >= len(tensor.get_shape().as_list()): + raise ValueError('dim must represent a valid dimension.') + flipped.append(tf.reverse_v2(tensor, [dim])) + return flipped + + is_flipped = tf.less_equal(random_value, prob) + outputs = tf.cond(is_flipped, flip, lambda: tensor_list) + if not isinstance(outputs, (list, tuple)): + outputs = [outputs] + outputs.append(is_flipped) + + return outputs + + +def _image_dimensions(image, rank): + """Returns the dimensions of an image tensor. + + Args: + image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. + rank: The expected rank of the image + + Returns: + A list of corresponding to the dimensions of the input image. Dimensions + that are statically known are python integers, otherwise they are integer + scalar tensors. + """ + if image.get_shape().is_fully_defined(): + return image.get_shape().as_list() + else: + static_shape = image.get_shape().with_rank(rank).as_list() + dynamic_shape = tf.unstack(tf.shape(image), rank) + return [ + s if s is not None else d for s, d in zip(static_shape, dynamic_shape) + ] + + +def get_label_resize_method(label): + """Returns the resize method of labels depending on label dtype. + + Args: + label: Groundtruth label tensor. + + Returns: + tf.image.ResizeMethod.BILINEAR, if label dtype is floating. + tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer. + + Raises: + ValueError: If label is neither floating nor integer. + """ + if label.dtype.is_floating: + return tf.image.ResizeMethod.BILINEAR + elif label.dtype.is_integer: + return tf.image.ResizeMethod.NEAREST_NEIGHBOR + else: + raise ValueError('Label type must be either floating or integer.') + + +def pad_to_bounding_box(image, offset_height, offset_width, target_height, + target_width, pad_value): + """Pads the given image with the given pad_value. + + Works like tf.image.pad_to_bounding_box, except it can pad the image + with any given arbitrary pad value and also handle images whose sizes are not + known during graph construction. + + Args: + image: 3-D tensor with shape [height, width, channels] + offset_height: Number of rows of zeros to add on top. + offset_width: Number of columns of zeros to add on the left. + target_height: Height of output image. + target_width: Width of output image. + pad_value: Value to pad the image tensor with. + + Returns: + 3-D tensor of shape [target_height, target_width, channels]. + + Raises: + ValueError: If the shape of image is incompatible with the offset_* or + target_* arguments. + """ + with tf.name_scope(None, 'pad_to_bounding_box', [image]): + image = tf.convert_to_tensor(image, name='image') + original_dtype = image.dtype + if original_dtype != tf.float32 and original_dtype != tf.float64: + # If image dtype is not float, we convert it to int32 to avoid overflow. + image = tf.cast(image, tf.int32) + image_rank_assert = tf.Assert( + tf.logical_or( + tf.equal(tf.rank(image), 3), + tf.equal(tf.rank(image), 4)), + ['Wrong image tensor rank.']) + with tf.control_dependencies([image_rank_assert]): + image -= pad_value + image_shape = image.get_shape() + is_batch = True + if image_shape.ndims == 3: + is_batch = False + image = tf.expand_dims(image, 0) + elif image_shape.ndims is None: + is_batch = False + image = tf.expand_dims(image, 0) + image.set_shape([None] * 4) + elif image.get_shape().ndims != 4: + raise ValueError('Input image must have either 3 or 4 dimensions.') + _, height, width, _ = _image_dimensions(image, rank=4) + target_width_assert = tf.Assert( + tf.greater_equal( + target_width, width), + ['target_width must be >= width']) + target_height_assert = tf.Assert( + tf.greater_equal(target_height, height), + ['target_height must be >= height']) + with tf.control_dependencies([target_width_assert]): + after_padding_width = target_width - offset_width - width + with tf.control_dependencies([target_height_assert]): + after_padding_height = target_height - offset_height - height + offset_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(after_padding_width, 0), + tf.greater_equal(after_padding_height, 0)), + ['target size not possible with the given target offsets']) + batch_params = tf.stack([0, 0]) + height_params = tf.stack([offset_height, after_padding_height]) + width_params = tf.stack([offset_width, after_padding_width]) + channel_params = tf.stack([0, 0]) + with tf.control_dependencies([offset_assert]): + paddings = tf.stack([batch_params, height_params, width_params, + channel_params]) + padded = tf.pad(image, paddings) + if not is_batch: + padded = tf.squeeze(padded, axis=[0]) + outputs = padded + pad_value + if outputs.dtype != original_dtype: + outputs = tf.cast(outputs, original_dtype) + return outputs + + +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: an image of shape [height, width, channels]. + offset_height: a scalar tensor indicating the height offset. + offset_width: a scalar tensor indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + The cropped (and resized) image. + + Raises: + ValueError: if `image` doesn't have rank of 3. + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + if len(image.get_shape().as_list()) != 3: + raise ValueError('input must have rank of 3') + original_channels = image.get_shape().as_list()[2] + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), + ['Rank of image must be equal to 3.']) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ['Crop size greater than the image size.']) + + offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + image = tf.reshape(image, cropped_shape) + image.set_shape([crop_height, crop_width, original_channels]) + return image + + +def random_crop(image_list, crop_height, crop_width): + """Crops the given list of images. + + The function applies the same crop to each image in the list. This can be + effectively applied when there are multiple image inputs of the same + dimension such as: + + image, depths, normals = random_crop([image, depths, normals], 120, 150) + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the new height. + crop_width: the new width. + + Returns: + the image_list with cropped images. + + Raises: + ValueError: if there are multiple image inputs provided with different size + or the images are smaller than the crop dimensions. + """ + if not image_list: + raise ValueError('Empty image_list.') + + # Compute the rank assertions. + rank_assertions = [] + for i in range(len(image_list)): + image_rank = tf.rank(image_list[i]) + rank_assert = tf.Assert( + tf.equal(image_rank, 3), + ['Wrong rank for tensor %s [expected] [actual]', + image_list[i].name, 3, image_rank]) + rank_assertions.append(rank_assert) + + with tf.control_dependencies([rank_assertions[0]]): + image_shape = tf.shape(image_list[0]) + image_height = image_shape[0] + image_width = image_shape[1] + crop_size_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(image_height, crop_height), + tf.greater_equal(image_width, crop_width)), + ['Crop size greater than the image size.']) + + asserts = [rank_assertions[0], crop_size_assert] + + for i in range(1, len(image_list)): + image = image_list[i] + asserts.append(rank_assertions[i]) + with tf.control_dependencies([rank_assertions[i]]): + shape = tf.shape(image) + height = shape[0] + width = shape[1] + + height_assert = tf.Assert( + tf.equal(height, image_height), + ['Wrong height for tensor %s [expected][actual]', + image.name, height, image_height]) + width_assert = tf.Assert( + tf.equal(width, image_width), + ['Wrong width for tensor %s [expected][actual]', + image.name, width, image_width]) + asserts.extend([height_assert, width_assert]) + + # Create a random bounding box. + # + # Use tf.random_uniform and not numpy.random.rand as doing the former would + # generate random numbers at graph eval time, unlike the latter which + # generates random numbers at graph definition time. + with tf.control_dependencies(asserts): + max_offset_height = tf.reshape(image_height - crop_height + 1, []) + max_offset_width = tf.reshape(image_width - crop_width + 1, []) + offset_height = tf.random_uniform( + [], maxval=max_offset_height, dtype=tf.int32) + offset_width = tf.random_uniform( + [], maxval=max_offset_width, dtype=tf.int32) + + return [_crop(image, offset_height, offset_width, + crop_height, crop_width) for image in image_list] + + +def get_random_scale(min_scale_factor, max_scale_factor, step_size): + """Gets a random scale value. + + Args: + min_scale_factor: Minimum scale value. + max_scale_factor: Maximum scale value. + step_size: The step size from minimum to maximum value. + + Returns: + A random scale value selected between minimum and maximum value. + + Raises: + ValueError: min_scale_factor has unexpected value. + """ + if min_scale_factor < 0 or min_scale_factor > max_scale_factor: + raise ValueError('Unexpected value of min_scale_factor.') + + if min_scale_factor == max_scale_factor: + return tf.cast(min_scale_factor, tf.float32) + + # When step_size = 0, we sample the value uniformly from [min, max). + if step_size == 0: + return tf.random_uniform([1], + minval=min_scale_factor, + maxval=max_scale_factor) + + # When step_size != 0, we randomly select one discrete value from [min, max]. + num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1) + scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps) + shuffled_scale_factors = tf.random_shuffle(scale_factors) + return shuffled_scale_factors[0] + + +def randomly_scale_image_and_label(image, label=None, scale=1.0): + """Randomly scales image and label. + + Args: + image: Image with shape [height, width, 3]. + label: Label with shape [height, width, 1]. + scale: The value to scale image and label. + + Returns: + Scaled image and label. + """ + # No random scaling if scale == 1. + if scale == 1.0: + return image, label + image_shape = tf.shape(image) + new_dim = tf.cast( + tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale, + tf.int32) + + # Need squeeze and expand_dims because image interpolation takes + # 4D tensors as input. + image = tf.squeeze(tf.image.resize_bilinear( + tf.expand_dims(image, 0), + new_dim, + align_corners=True), [0]) + if label is not None: + label = tf.image.resize( + label, + new_dim, + method=get_label_resize_method(label), + align_corners=True) + + return image, label + + +def resolve_shape(tensor, rank=None, scope=None): + """Fully resolves the shape of a Tensor. + + Use as much as possible the shape components already known during graph + creation and resolve the remaining ones during runtime. + + Args: + tensor: Input tensor whose shape we query. + rank: The rank of the tensor, provided that we know it. + scope: Optional name scope. + + Returns: + shape: The full shape of the tensor. + """ + with tf.name_scope(scope, 'resolve_shape', [tensor]): + if rank is not None: + shape = tensor.get_shape().with_rank(rank).as_list() + else: + shape = tensor.get_shape().as_list() + + if None in shape: + shape_dynamic = tf.shape(tensor) + for i in range(len(shape)): + if shape[i] is None: + shape[i] = shape_dynamic[i] + + return shape + + +def resize_to_range(image, + label=None, + min_size=None, + max_size=None, + factor=None, + keep_aspect_ratio=True, + align_corners=True, + label_layout_is_chw=False, + scope=None, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image or label so their sides are within the provided range. + + The output size can be described by two cases: + 1. If the image can be rescaled so its minimum size is equal to min_size + without the other side exceeding max_size, then do so. + 2. Otherwise, resize so the largest side is equal to max_size. + + An integer in `range(factor)` is added to the computed sides so that the + final dimensions are multiples of `factor` plus one. + + Args: + image: A 3D tensor of shape [height, width, channels]. + label: (optional) A 3D tensor of shape [height, width, channels] (default) + or [channels, height, width] when label_layout_is_chw = True. + min_size: (scalar) desired size of the smaller image side. + max_size: (scalar) maximum allowed size of the larger image side. Note + that the output dimension is no larger than max_size and may be slightly + smaller than max_size when factor is not None. + factor: Make output size multiple of factor plus one. + keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input + will be resized while keeping the original aspect ratio. If False, the + input will be resized to [max_resize_value, max_resize_value] without + keeping the original aspect ratio. + align_corners: If True, exactly align all 4 corners of input and output. + label_layout_is_chw: If true, the label has shape [channel, height, width]. + We support this case because for some instance segmentation dataset, the + instance segmentation is saved as [num_instances, height, width]. + scope: Optional name scope. + method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR. + + Returns: + A 3-D tensor of shape [new_height, new_width, channels], where the image + has been resized (with the specified method) so that + min(new_height, new_width) == ceil(min_size) or + max(new_height, new_width) == ceil(max_size). + + Raises: + ValueError: If the image is not a 3D tensor. + """ + with tf.name_scope(scope, 'resize_to_range', [image]): + new_tensor_list = [] + min_size = tf.cast(min_size, tf.float32) + if max_size is not None: + max_size = tf.cast(max_size, tf.float32) + # Modify the max_size to be a multiple of factor plus 1 and make sure the + # max dimension after resizing is no larger than max_size. + if factor is not None: + max_size = (max_size - (max_size - 1) % factor) + + [orig_height, orig_width, _] = resolve_shape(image, rank=3) + orig_height = tf.cast(orig_height, tf.float32) + orig_width = tf.cast(orig_width, tf.float32) + orig_min_size = tf.minimum(orig_height, orig_width) + + # Calculate the larger of the possible sizes + large_scale_factor = min_size / orig_min_size + large_height = tf.cast(tf.floor(orig_height * large_scale_factor), tf.int32) + large_width = tf.cast(tf.floor(orig_width * large_scale_factor), tf.int32) + large_size = tf.stack([large_height, large_width]) + + new_size = large_size + if max_size is not None: + # Calculate the smaller of the possible sizes, use that if the larger + # is too big. + orig_max_size = tf.maximum(orig_height, orig_width) + small_scale_factor = max_size / orig_max_size + small_height = tf.cast( + tf.floor(orig_height * small_scale_factor), tf.int32) + small_width = tf.cast(tf.floor(orig_width * small_scale_factor), tf.int32) + small_size = tf.stack([small_height, small_width]) + new_size = tf.cond( + tf.cast(tf.reduce_max(large_size), tf.float32) > max_size, + lambda: small_size, + lambda: large_size) + # Ensure that both output sides are multiples of factor plus one. + if factor is not None: + new_size += (factor - (new_size - 1) % factor) % factor + if not keep_aspect_ratio: + # If not keep the aspect ratio, we resize everything to max_size, allowing + # us to do pre-processing without extra padding. + new_size = [tf.reduce_max(new_size), tf.reduce_max(new_size)] + new_tensor_list.append(tf.image.resize( + image, new_size, method=method, align_corners=align_corners)) + if label is not None: + if label_layout_is_chw: + # Input label has shape [channel, height, width]. + resized_label = tf.expand_dims(label, 3) + resized_label = tf.image.resize( + resized_label, + new_size, + method=get_label_resize_method(label), + align_corners=align_corners) + resized_label = tf.squeeze(resized_label, 3) + else: + # Input label has shape [height, width, channel]. + resized_label = tf.image.resize( + label, + new_size, + method=get_label_resize_method(label), + align_corners=align_corners) + new_tensor_list.append(resized_label) + else: + new_tensor_list.append(None) + return new_tensor_list diff --git a/deeplab/models/research/deeplab/core/preprocess_utils_test.py b/deeplab/models/research/deeplab/core/preprocess_utils_test.py new file mode 100644 index 0000000..606fe46 --- /dev/null +++ b/deeplab/models/research/deeplab/core/preprocess_utils_test.py @@ -0,0 +1,515 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for preprocess_utils.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import range +import tensorflow as tf + +from deeplab.core import preprocess_utils + + +class PreprocessUtilsTest(tf.test.TestCase): + + def testNoFlipWhenProbIsZero(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + image = tf.convert_to_tensor(numpy_image) + + with self.test_session(): + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=0) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=1) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=2) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + + def testFlipWhenProbIsOne(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + dim0_flipped = np.dstack([[[9., 0.], + [5., 6.]], + [[3., 5.], + [4., 3.]]]) + dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + dim2_flipped = np.dstack([[[4., 3.], + [3., 5.]], + [[5., 6.], + [9., 0.]]]) + image = tf.convert_to_tensor(numpy_image) + + with self.test_session(): + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=0) + self.assertAllEqual(dim0_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=1) + self.assertAllEqual(dim1_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=2) + self.assertAllEqual(dim2_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + + def testFlipMultipleImagesConsistentlyWhenProbIsOne(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + numpy_label = np.dstack([[[0., 1.], + [2., 3.]]]) + image_dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + label_dim1_flipped = np.dstack([[[1., 0.], + [3., 2.]]]) + image = tf.convert_to_tensor(numpy_image) + label = tf.convert_to_tensor(numpy_label) + + with self.test_session() as sess: + image, label, is_flipped = preprocess_utils.flip_dim( + [image, label], prob=1, dim=1) + actual_image, actual_label = sess.run([image, label]) + self.assertAllEqual(image_dim1_flipped, actual_image) + self.assertAllEqual(label_dim1_flipped, actual_label) + self.assertEqual(True, is_flipped.eval()) + + def testReturnRandomFlipsOnMultipleEvals(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + image = tf.convert_to_tensor(numpy_image) + tf.compat.v1.set_random_seed(53) + + with self.test_session() as sess: + actual, is_flipped = preprocess_utils.flip_dim( + [image], prob=0.5, dim=1) + actual_image, actual_is_flipped = sess.run([actual, is_flipped]) + self.assertAllEqual(numpy_image, actual_image) + self.assertEqual(False, actual_is_flipped) + actual_image, actual_is_flipped = sess.run([actual, is_flipped]) + self.assertAllEqual(dim1_flipped, actual_image) + self.assertEqual(True, actual_is_flipped) + + def testReturnCorrectCropOfSingleImage(self): + np.random.seed(0) + + height, width = 10, 20 + image = np.random.randint(0, 256, size=(height, width, 3)) + + crop_height, crop_width = 2, 4 + + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop([image_placeholder], + crop_height, + crop_width) + + with self.test_session(): + cropped_image = cropped.eval(feed_dict={image_placeholder: image}) + + # Ensure we can find the cropped image in the original: + is_found = False + for x in range(0, width - crop_width + 1): + for y in range(0, height - crop_height + 1): + if np.isclose(image[y:y+crop_height, x:x+crop_width, :], + cropped_image).all(): + is_found = True + break + + self.assertTrue(is_found) + + def testRandomCropMaintainsNumberOfChannels(self): + np.random.seed(0) + + crop_height, crop_width = 10, 20 + image = np.random.randint(0, 256, size=(100, 200, 3)) + + tf.compat.v1.set_random_seed(37) + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop( + [image_placeholder], crop_height, crop_width) + + with self.test_session(): + cropped_image = cropped.eval(feed_dict={image_placeholder: image}) + self.assertTupleEqual(cropped_image.shape, (crop_height, crop_width, 3)) + + def testReturnDifferentCropAreasOnTwoEvals(self): + tf.compat.v1.set_random_seed(0) + + crop_height, crop_width = 2, 3 + image = np.random.randint(0, 256, size=(100, 200, 3)) + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop( + [image_placeholder], crop_height, crop_width) + + with self.test_session(): + crop0 = cropped.eval(feed_dict={image_placeholder: image}) + crop1 = cropped.eval(feed_dict={image_placeholder: image}) + self.assertFalse(np.isclose(crop0, crop1).all()) + + def testReturnConsistenCropsOfImagesInTheList(self): + tf.compat.v1.set_random_seed(0) + + height, width = 10, 20 + crop_height, crop_width = 2, 3 + labels = np.linspace(0, height * width-1, height * width) + labels = labels.reshape((height, width, 1)) + image = np.tile(labels, (1, 1, 3)) + + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + label_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1)) + [cropped_image, cropped_label] = preprocess_utils.random_crop( + [image_placeholder, label_placeholder], crop_height, crop_width) + + with self.test_session() as sess: + cropped_image, cropped_labels = sess.run([cropped_image, cropped_label], + feed_dict={ + image_placeholder: image, + label_placeholder: labels}) + for i in range(3): + self.assertAllEqual(cropped_image[:, :, i], cropped_labels.squeeze()) + + def testDieOnRandomCropWhenImagesWithDifferentWidth(self): + crop_height, crop_width = 2, 3 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(4, 6, 1)}) + + def testDieOnRandomCropWhenImagesWithDifferentHeight(self): + crop_height, crop_width = 2, 3 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Wrong height for tensor'): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(3, 5, 1)}) + + def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self): + crop_height, crop_width = 5, 9 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Crop size greater than the image size.'): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(4, 5, 1)}) + + def testReturnPaddedImageWithNonZeroPadValue(self): + for dtype in [np.int32, np.int64, np.float32, np.float64]: + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]).astype(dtype) + expected_image = np.dstack([[[255, 255, 255, 255, 255], + [255, 255, 255, 255, 255], + [255, 5, 6, 255, 255], + [255, 9, 0, 255, 255], + [255, 255, 255, 255, 255]], + [[255, 255, 255, 255, 255], + [255, 255, 255, 255, 255], + [255, 4, 3, 255, 255], + [255, 3, 5, 255, 255], + [255, 255, 255, 255, 255]]]).astype(dtype) + + with self.session() as sess: + padded_image = preprocess_utils.pad_to_bounding_box( + image, 2, 1, 5, 5, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, expected_image) + # Add batch size = 1 to image. + padded_image = preprocess_utils.pad_to_bounding_box( + np.expand_dims(image, 0), 2, 1, 5, 5, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, np.expand_dims(expected_image, 0)) + + def testReturnOriginalImageWhenTargetSizeIsEqualToImageSize(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.session() as sess: + padded_image = preprocess_utils.pad_to_bounding_box( + image, 0, 0, 2, 2, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, image) + + def testDieOnTargetSizeGreaterThanImageSize(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 2, 1, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target_width must be >= width'): + padded_image.eval(feed_dict={image_placeholder: image}) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 1, 2, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target_height must be >= height'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testDieIfTargetSizeNotPossibleWithGivenOffset(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 3, 0, 4, 4, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target size not possible with the given target offsets'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testDieIfImageTensorRankIsTwo(self): + image = np.vstack([[5, 6], + [9, 0]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 2, 2, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Wrong image tensor rank'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testResizeTensorsToRange(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 100 + factor = None + expected_shape_list = [(75, 50, 3), + (50, 100, 3), + (30, 100, 3)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=None, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + resized_image = session.run(new_tensor_list[0]) + self.assertEqual(resized_image.shape, expected_shape_list[i]) + + def testResizeTensorsToRangeWithFactor(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 98 + factor = 8 + expected_image_shape_list = [(81, 57, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(81, 57, 1), + (49, 97, 1), + (33, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithFactorAndLabelShapeCHW(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 98 + factor = 8 + expected_image_shape_list = [(81, 57, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(5, 81, 57), + (5, 49, 97), + (5, 33, 97)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([5, test_shape[0], test_shape[1]]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True, + label_layout_is_chw=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithSimilarMinMaxSizes(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + # Values set so that one of the side = 97. + min_size = 96 + max_size = 98 + factor = 8 + expected_image_shape_list = [(97, 65, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(97, 65, 1), + (49, 97, 1), + (33, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithEqualMaxSize(self): + test_shapes = [[97, 38], + [96, 97]] + # Make max_size equal to the larger value of test_shapes. + min_size = 97 + max_size = 97 + factor = 8 + expected_image_shape_list = [(97, 41, 3), + (97, 97, 3)] + expected_label_shape_list = [(97, 41, 1), + (97, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithPotentialErrorInTFCeil(self): + test_shape = [3936, 5248] + # Make max_size equal to the larger value of test_shapes. + min_size = 1441 + max_size = 1441 + factor = 16 + expected_image_shape = (1089, 1441, 3) + expected_label_shape = (1089, 1441, 1) + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape) + + def testResizeTensorsToRangeWithEqualMaxSizeWithoutAspectRatio(self): + test_shapes = [[97, 38], + [96, 97]] + # Make max_size equal to the larger value of test_shapes. + min_size = 97 + max_size = 97 + factor = 8 + keep_aspect_ratio = False + expected_image_shape_list = [(97, 97, 3), + (97, 97, 3)] + expected_label_shape_list = [(97, 97, 1), + (97, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + keep_aspect_ratio=keep_aspect_ratio, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/resnet_v1_beta.py b/deeplab/models/research/deeplab/core/resnet_v1_beta.py new file mode 100644 index 0000000..0d5f1f1 --- /dev/null +++ b/deeplab/models/research/deeplab/core/resnet_v1_beta.py @@ -0,0 +1,827 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet v1 model variants. + +Code branched out from slim/nets/resnet_v1.py, and please refer to it for +more details. + +The original version ResNets-v1 were proposed by: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import conv2d_ws +from deeplab.core import utils +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + +_DEFAULT_MULTI_GRID = [1, 1, 1] +_DEFAULT_MULTI_GRID_RESNET_18 = [1, 1] + + +@slim.add_arg_scope +def bottleneck(inputs, + depth, + depth_bottleneck, + stride, + unit_rate=1, + rate=1, + outputs_collections=None, + scope=None): + """Bottleneck residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. Note that we use here the bottleneck variant which has an + extra bottleneck layer. + + When putting together two consecutive ResNet blocks that use this unit, one + should use stride = 2 in the last unit of the first block. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + depth_bottleneck: The depth of the bottleneck layers. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + unit_rate: An integer, unit rate for atrous convolution. + rate: An integer, rate for atrous convolution. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + if depth == depth_in: + shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') + else: + shortcut = conv2d_ws.conv2d( + inputs, + depth, + [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + + residual = conv2d_ws.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, + scope='conv1') + residual = conv2d_ws.conv2d_same(residual, depth_bottleneck, 3, stride, + rate=rate*unit_rate, scope='conv2') + residual = conv2d_ws.conv2d(residual, depth, [1, 1], stride=1, + activation_fn=None, scope='conv3') + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, sc.name, + output) + + +@slim.add_arg_scope +def lite_bottleneck(inputs, + depth, + stride, + unit_rate=1, + rate=1, + outputs_collections=None, + scope=None): + """Bottleneck residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. Note that we use here the bottleneck variant which has an + extra bottleneck layer. + + When putting together two consecutive ResNet blocks that use this unit, one + should use stride = 2 in the last unit of the first block. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + unit_rate: An integer, unit rate for atrous convolution. + rate: An integer, rate for atrous convolution. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'lite_bottleneck_v1', [inputs]) as sc: + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + if depth == depth_in: + shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') + else: + shortcut = conv2d_ws.conv2d( + inputs, + depth, [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + + residual = conv2d_ws.conv2d_same( + inputs, depth, 3, 1, rate=rate * unit_rate, scope='conv1') + with slim.arg_scope([conv2d_ws.conv2d], activation_fn=None): + residual = conv2d_ws.conv2d_same( + residual, depth, 3, stride, rate=rate * unit_rate, scope='conv2') + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, sc.name, + output) + + +def root_block_fn_for_beta_variant(net, depth_multiplier=1.0): + """Gets root_block_fn for beta variant. + + ResNet-v1 beta variant modifies the first original 7x7 convolution to three + 3x3 convolutions. + + Args: + net: A tensor of size [batch, height, width, channels], input to the model. + depth_multiplier: Controls the number of convolution output channels for + each input channel. The total number of depthwise convolution output + channels will be equal to `num_filters_out * depth_multiplier`. + + Returns: + A tensor after three 3x3 convolutions. + """ + net = conv2d_ws.conv2d_same( + net, int(64 * depth_multiplier), 3, stride=2, scope='conv1_1') + net = conv2d_ws.conv2d_same( + net, int(64 * depth_multiplier), 3, stride=1, scope='conv1_2') + net = conv2d_ws.conv2d_same( + net, int(128 * depth_multiplier), 3, stride=1, scope='conv1_3') + + return net + + +def resnet_v1_beta(inputs, + blocks, + num_classes=None, + is_training=None, + global_pool=True, + output_stride=None, + root_block_fn=None, + reuse=None, + scope=None, + sync_batch_norm_method='None'): + """Generator for v1 ResNet models (beta variant). + + This function generates a family of modified ResNet v1 models. In particular, + the first original 7x7 convolution is replaced with three 3x3 convolutions. + See the resnet_v1_*() methods for specific model instantiations, obtained by + selecting different block instantiations that produce ResNets of various + depths. + + The code is modified from slim/nets/resnet_v1.py, and please refer to it for + more details. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + blocks: A list of length equal to the number of ResNet blocks. Each element + is a resnet_utils.Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + root_block_fn: The function consisting of convolution operations applied to + the root input. If root_block_fn is None, use the original setting of + RseNet-v1, which is simply one convolution with 7x7 kernel and stride=2. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + if root_block_fn is None: + root_block_fn = functools.partial(conv2d_ws.conv2d_same, + num_outputs=64, + kernel_size=7, + stride=2, + scope='conv1') + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + '_end_points' + with slim.arg_scope([ + conv2d_ws.conv2d, bottleneck, lite_bottleneck, + resnet_utils.stack_blocks_dense + ], + outputs_collections=end_points_collection): + if is_training is not None: + arg_scope = slim.arg_scope([batch_norm], is_training=is_training) + else: + arg_scope = slim.arg_scope([]) + with arg_scope: + net = inputs + if output_stride is not None: + if output_stride % 4 != 0: + raise ValueError('The output_stride needs to be a multiple of 4.') + output_stride //= 4 + net = root_block_fn(net) + net = slim.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool1') + net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) + + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='pool5', keepdims=True) + if num_classes is not None: + net = conv2d_ws.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits', + use_weight_standardization=False) + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection) + if num_classes is not None: + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def resnet_v1_beta_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_v1 beta variant bottleneck block. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + + Returns: + A resnet_v1 bottleneck block. + """ + return resnet_utils.Block(scope, bottleneck, [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': 1, + 'unit_rate': 1 + }] * (num_units - 1) + [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': stride, + 'unit_rate': 1 + }]) + + +def resnet_v1_small_beta_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_18 beta variant bottleneck block. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + + Returns: + A resnet_18 bottleneck block. + """ + block_args = [] + for _ in range(num_units - 1): + block_args.append({'depth': base_depth, 'stride': 1, 'unit_rate': 1}) + block_args.append({'depth': base_depth, 'stride': stride, 'unit_rate': 1}) + return resnet_utils.Block(scope, lite_bottleneck, block_args) + + +def resnet_v1_18(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_18', + sync_batch_norm_method='None'): + """Resnet v1 18. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID_RESNET_18 + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + + block4_args = [] + for rate in multi_grid: + block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate}) + + blocks = [ + resnet_v1_small_beta_block( + 'block1', base_depth=64, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block2', base_depth=128, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block3', base_depth=256, num_units=2, stride=2), + resnet_utils.Block('block4', lite_bottleneck, block4_args), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_18_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + root_depth_multiplier=0.25, + reuse=None, + scope='resnet_v1_18', + sync_batch_norm_method='None'): + """Resnet v1 18 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-18. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + root_depth_multiplier: Float, depth multiplier used for the first three + convolution layers that replace the 7x7 convolution. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID_RESNET_18 + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + + block4_args = [] + for rate in multi_grid: + block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate}) + + blocks = [ + resnet_v1_small_beta_block( + 'block1', base_depth=64, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block2', base_depth=128, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block3', base_depth=256, num_units=2, stride=2), + resnet_utils.Block('block4', lite_bottleneck, block4_args), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant, + depth_multiplier=root_depth_multiplier), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_50(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_50', + sync_batch_norm_method='None'): + """Resnet v1 50. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=6, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_50_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_50', + sync_batch_norm_method='None'): + """Resnet v1 50 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-50. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=6, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_101(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_101', + sync_batch_norm_method='None'): + """Resnet v1 101. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=23, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_101_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_101', + sync_batch_norm_method='None'): + """Resnet v1 101 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-101. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=23, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_arg_scope(weight_decay=0.0001, + batch_norm_decay=0.997, + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=tf.nn.relu, + use_batch_norm=True, + sync_batch_norm_method='None', + normalization_method='unspecified', + use_weight_standardization=False): + """Defines the default ResNet arg scope. + + Args: + weight_decay: The weight decay to use for regularizing the model. + batch_norm_decay: The moving average decay when estimating layer activation + statistics in batch normalization. + batch_norm_epsilon: Small constant to prevent division by zero when + normalizing activations by their variance in batch normalization. + batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + activation_fn: The activation function which is used in ResNet. + use_batch_norm: Deprecated in favor of normalization_method. + sync_batch_norm_method: String, sync batchnorm method. + normalization_method: String, one of `batch`, `none`, or `group`, to use + batch normalization, no normalization, or group normalization. + use_weight_standardization: Boolean, whether to use weight standardization. + + Returns: + An `arg_scope` to use for the resnet models. + """ + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + 'scale': batch_norm_scale, + } + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + if normalization_method == 'batch': + normalizer_fn = batch_norm + elif normalization_method == 'none': + normalizer_fn = None + elif normalization_method == 'group': + normalizer_fn = slim.group_norm + elif normalization_method == 'unspecified': + normalizer_fn = batch_norm if use_batch_norm else None + else: + raise ValueError('Unrecognized normalization_method %s' % + normalization_method) + + with slim.arg_scope([conv2d_ws.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=slim.variance_scaling_initializer(), + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + use_weight_standardization=use_weight_standardization): + with slim.arg_scope([batch_norm], **batch_norm_params): + # The following implies padding='SAME' for pool1, which makes feature + # alignment easier for dense prediction tasks. This is also used in + # https://github.com/facebook/fb.resnet.torch. However the accompanying + # code of 'Deep Residual Learning for Image Recognition' uses + # padding='VALID' for pool1. You can switch to that choice by setting + # slim.arg_scope([slim.max_pool2d], padding='VALID'). + with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: + return arg_sc diff --git a/deeplab/models/research/deeplab/core/resnet_v1_beta_test.py b/deeplab/models/research/deeplab/core/resnet_v1_beta_test.py new file mode 100644 index 0000000..8b61edc --- /dev/null +++ b/deeplab/models/research/deeplab/core/resnet_v1_beta_test.py @@ -0,0 +1,564 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for resnet_v1_beta module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import resnet_v1_beta +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Create test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.to_float( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels])) + + +class ResnetCompleteNetworkTest(tf.test.TestCase): + """Tests with complete small ResNet v1 networks.""" + + def _resnet_small_lite_bottleneck(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_small'): + """A shallow and thin ResNet v1 with lite_bottleneck.""" + if multi_grid is None: + multi_grid = [1, 1] + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + block = resnet_v1_beta.resnet_v1_small_beta_block + blocks = [ + block('block1', base_depth=1, num_units=1, stride=2), + block('block2', base_depth=2, num_units=1, stride=2), + block('block3', base_depth=4, num_units=1, stride=2), + resnet_utils.Block('block4', resnet_v1_beta.lite_bottleneck, [ + {'depth': 8, + 'stride': 1, + 'unit_rate': rate} for rate in multi_grid])] + return resnet_v1_beta.resnet_v1_beta( + inputs, + blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial( + resnet_v1_beta.root_block_fn_for_beta_variant, + depth_multiplier=0.25), + reuse=reuse, + scope=scope) + + def _resnet_small(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_small'): + """A shallow and thin ResNet v1 for faster tests.""" + if multi_grid is None: + multi_grid = [1, 1, 1] + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + block = resnet_v1_beta.resnet_v1_beta_block + blocks = [ + block('block1', base_depth=1, num_units=1, stride=2), + block('block2', base_depth=2, num_units=1, stride=2), + block('block3', base_depth=4, num_units=1, stride=2), + resnet_utils.Block('block4', resnet_v1_beta.bottleneck, [ + {'depth': 32, 'depth_bottleneck': 8, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid])] + + return resnet_v1_beta.resnet_v1_beta( + inputs, + blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial( + resnet_v1_beta.root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope) + + def testClassificationEndPointsWithLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithMultigridAndLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + multi_grid = [1, 2] + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + multi_grid=multi_grid, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationShapesWithLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 112, 112, 16], + 'resnet/conv1_2': [2, 112, 112, 16], + 'resnet/conv1_3': [2, 112, 112, 32], + 'resnet/block1': [2, 28, 28, 1], + 'resnet/block2': [2, 14, 14, 2], + 'resnet/block3': [2, 7, 7, 4], + 'resnet/block4': [2, 7, 7, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapesWithLiteBottleneck(self): + global_pool = False + num_classes = 10 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 16], + 'resnet/conv1_2': [2, 161, 161, 16], + 'resnet/conv1_3': [2, 161, 161, 32], + 'resnet/block1': [2, 41, 41, 1], + 'resnet/block2': [2, 21, 21, 2], + 'resnet/block3': [2, 11, 11, 4], + 'resnet/block4': [2, 11, 11, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapesWithLiteBottleneck(self): + global_pool = False + num_classes = 10 + output_stride = 8 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 16], + 'resnet/conv1_2': [2, 161, 161, 16], + 'resnet/conv1_3': [2, 161, 161, 32], + 'resnet/block1': [2, 41, 41, 1], + 'resnet/block2': [2, 41, 41, 2], + 'resnet/block3': [2, 41, 41, 4], + 'resnet/block4': [2, 41, 41, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValuesWithLiteBottleneck(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 81, 81, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-4, rtol=1e-4) + + def testUnknownBatchSizeWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, _ = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 8]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 3, 3, 8)) + + def testAtrousFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 8]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 9, 9, 8)) + + def testClassificationEndPoints(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithWS(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(use_weight_standardization=True)): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithGN(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(normalization_method='group')): + with slim.arg_scope([slim.group_norm], groups=1): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testInvalidGroupsWithGN(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with self.assertRaisesRegexp(ValueError, 'Invalid groups'): + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(normalization_method='group')): + with slim.arg_scope([slim.group_norm], groups=32): + _, _ = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + def testClassificationEndPointsWithGNWS(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope( + normalization_method='group', use_weight_standardization=True)): + with slim.arg_scope([slim.group_norm], groups=1): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithMultigrid(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + multi_grid = [1, 2, 4] + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + multi_grid=multi_grid, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationShapes(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 112, 112, 64], + 'resnet/conv1_2': [2, 112, 112, 64], + 'resnet/conv1_3': [2, 112, 112, 128], + 'resnet/block1': [2, 28, 28, 4], + 'resnet/block2': [2, 14, 14, 8], + 'resnet/block3': [2, 7, 7, 16], + 'resnet/block4': [2, 7, 7, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 10 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 64], + 'resnet/conv1_2': [2, 161, 161, 64], + 'resnet/conv1_3': [2, 161, 161, 128], + 'resnet/block1': [2, 41, 41, 4], + 'resnet/block2': [2, 21, 21, 8], + 'resnet/block3': [2, 11, 11, 16], + 'resnet/block4': [2, 11, 11, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 10 + output_stride = 8 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 64], + 'resnet/conv1_2': [2, 161, 161, 64], + 'resnet/conv1_3': [2, 161, 161, 128], + 'resnet/block1': [2, 41, 41, 4], + 'resnet/block2': [2, 41, 41, 8], + 'resnet/block3': [2, 41, 41, 16], + 'resnet/block4': [2, 41, 41, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValues(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 81, 81, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._resnet_small(inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._resnet_small(inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-4, rtol=1e-4) + + def testUnknownBatchSize(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, _ = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small(inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 32]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 3, 3, 32)) + + def testAtrousFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small(inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 32]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 9, 9, 32)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/utils.py b/deeplab/models/research/deeplab/core/utils.py new file mode 100644 index 0000000..4bf3d09 --- /dev/null +++ b/deeplab/models/research/deeplab/core/utils.py @@ -0,0 +1,214 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""This script contains utility functions.""" +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim + +slim = contrib_slim + + +# Quantized version of sigmoid function. +q_sigmoid = lambda x: tf.nn.relu6(x + 3) * 0.16667 + + +def resize_bilinear(images, size, output_dtype=tf.float32): + """Returns resized images as output_type. + + Args: + images: A tensor of size [batch, height_in, width_in, channels]. + size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size + for the images. + output_dtype: The destination type. + Returns: + A tensor of size [batch, height_out, width_out, channels] as a dtype of + output_dtype. + """ + images = tf.image.resize_bilinear(images, size, align_corners=True) + return tf.cast(images, dtype=output_dtype) + + +def scale_dimension(dim, scale): + """Scales the input dimension. + + Args: + dim: Input dimension (a scalar or a scalar Tensor). + scale: The amount of scaling applied to the input. + + Returns: + Scaled dimension. + """ + if isinstance(dim, tf.Tensor): + return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32) + else: + return int((float(dim) - 1.0) * scale + 1.0) + + +def split_separable_conv2d(inputs, + filters, + kernel_size=3, + rate=1, + weight_decay=0.00004, + depthwise_weights_initializer_stddev=0.33, + pointwise_weights_initializer_stddev=0.06, + scope=None): + """Splits a separable conv2d into depthwise and pointwise conv2d. + + This operation differs from `tf.layers.separable_conv2d` as this operation + applies activation function between depthwise and pointwise conv2d. + + Args: + inputs: Input tensor with shape [batch, height, width, channels]. + filters: Number of filters in the 1x1 pointwise convolution. + kernel_size: A list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + rate: Atrous convolution rate for the depthwise convolution. + weight_decay: The weight decay to use for regularizing the model. + depthwise_weights_initializer_stddev: The standard deviation of the + truncated normal weight initializer for depthwise convolution. + pointwise_weights_initializer_stddev: The standard deviation of the + truncated normal weight initializer for pointwise convolution. + scope: Optional scope for the operation. + + Returns: + Computed features after split separable conv2d. + """ + outputs = slim.separable_conv2d( + inputs, + None, + kernel_size=kernel_size, + depth_multiplier=1, + rate=rate, + weights_initializer=tf.truncated_normal_initializer( + stddev=depthwise_weights_initializer_stddev), + weights_regularizer=None, + scope=scope + '_depthwise') + return slim.conv2d( + outputs, + filters, + 1, + weights_initializer=tf.truncated_normal_initializer( + stddev=pointwise_weights_initializer_stddev), + weights_regularizer=slim.l2_regularizer(weight_decay), + scope=scope + '_pointwise') + + +def get_label_weight_mask(labels, ignore_label, num_classes, label_weights=1.0): + """Gets the label weight mask. + + Args: + labels: A Tensor of labels with the shape of [-1]. + ignore_label: Integer, label to ignore. + num_classes: Integer, the number of semantic classes. + label_weights: A float or a list of weights. If it is a float, it means all + the labels have the same weight. If it is a list of weights, then each + element in the list represents the weight for the label of its index, for + example, label_weights = [0.1, 0.5] means the weight for label 0 is 0.1 + and the weight for label 1 is 0.5. + + Returns: + A Tensor of label weights with the same shape of labels, each element is the + weight for the label with the same index in labels and the element is 0.0 + if the label is to ignore. + + Raises: + ValueError: If label_weights is neither a float nor a list, or if + label_weights is a list and its length is not equal to num_classes. + """ + if not isinstance(label_weights, (float, list)): + raise ValueError( + 'The type of label_weights is invalid, it must be a float or a list.') + + if isinstance(label_weights, list) and len(label_weights) != num_classes: + raise ValueError( + 'Length of label_weights must be equal to num_classes if it is a list, ' + 'label_weights: %s, num_classes: %d.' % (label_weights, num_classes)) + + not_ignore_mask = tf.not_equal(labels, ignore_label) + not_ignore_mask = tf.cast(not_ignore_mask, tf.float32) + if isinstance(label_weights, float): + return not_ignore_mask * label_weights + + label_weights = tf.constant(label_weights, tf.float32) + weight_mask = tf.einsum('...y,y->...', + tf.one_hot(labels, num_classes, dtype=tf.float32), + label_weights) + return tf.multiply(not_ignore_mask, weight_mask) + + +def get_batch_norm_fn(sync_batch_norm_method): + """Gets batch norm function. + + Currently we only support the following methods: + - `None` (no sync batch norm). We use slim.batch_norm in this case. + + Args: + sync_batch_norm_method: String, method used to sync batch norm. + + Returns: + Batchnorm function. + + Raises: + ValueError: If sync_batch_norm_method is not supported. + """ + if sync_batch_norm_method == 'None': + return slim.batch_norm + else: + raise ValueError('Unsupported sync_batch_norm_method.') + + +def get_batch_norm_params(decay=0.9997, + epsilon=1e-5, + center=True, + scale=True, + is_training=True, + sync_batch_norm_method='None', + initialize_gamma_as_zeros=False): + """Gets batch norm parameters. + + Args: + decay: Float, decay for the moving average. + epsilon: Float, value added to variance to avoid dividing by zero. + center: Boolean. If True, add offset of `beta` to normalized tensor. If + False,`beta` is ignored. + scale: Boolean. If True, multiply by `gamma`. If False, `gamma` is not used. + is_training: Boolean, whether or not the layer is in training mode. + sync_batch_norm_method: String, method used to sync batch norm. + initialize_gamma_as_zeros: Boolean, initializing `gamma` as zeros or not. + + Returns: + A dictionary for batchnorm parameters. + + Raises: + ValueError: If sync_batch_norm_method is not supported. + """ + batch_norm_params = { + 'is_training': is_training, + 'decay': decay, + 'epsilon': epsilon, + 'scale': scale, + 'center': center, + } + if initialize_gamma_as_zeros: + if sync_batch_norm_method == 'None': + # Slim-type gamma_initialier. + batch_norm_params['param_initializers'] = { + 'gamma': tf.zeros_initializer(), + } + else: + raise ValueError('Unsupported sync_batch_norm_method.') + return batch_norm_params diff --git a/deeplab/models/research/deeplab/core/utils_test.py b/deeplab/models/research/deeplab/core/utils_test.py new file mode 100644 index 0000000..cfdb63e --- /dev/null +++ b/deeplab/models/research/deeplab/core/utils_test.py @@ -0,0 +1,90 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for utils.py.""" + +import numpy as np +import tensorflow as tf + +from deeplab.core import utils + + +class UtilsTest(tf.test.TestCase): + + def testScaleDimensionOutput(self): + self.assertEqual(161, utils.scale_dimension(321, 0.5)) + self.assertEqual(193, utils.scale_dimension(321, 0.6)) + self.assertEqual(241, utils.scale_dimension(321, 0.75)) + + def testGetLabelWeightMask_withFloatLabelWeights(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = 0.5 + expected_label_weight_mask = np.array([0.5, 0.0, 0.5, 0.5, 0.5], + dtype=np.float32) + + with self.test_session() as sess: + label_weight_mask = utils.get_label_weight_mask( + labels, ignore_label, num_classes, label_weights=label_weights) + label_weight_mask = sess.run(label_weight_mask) + self.assertAllEqual(label_weight_mask, expected_label_weight_mask) + + def testGetLabelWeightMask_withListLabelWeights(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = [0.0, 0.1, 0.2, 0.3, 0.4] + expected_label_weight_mask = np.array([0.0, 0.0, 0.1, 0.3, 0.2], + dtype=np.float32) + + with self.test_session() as sess: + label_weight_mask = utils.get_label_weight_mask( + labels, ignore_label, num_classes, label_weights=label_weights) + label_weight_mask = sess.run(label_weight_mask) + self.assertAllEqual(label_weight_mask, expected_label_weight_mask) + + def testGetLabelWeightMask_withInvalidLabelWeightsType(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + + self.assertRaisesWithRegexpMatch( + ValueError, + '^The type of label_weights is invalid, it must be a float or a list', + utils.get_label_weight_mask, + labels=labels, + ignore_label=ignore_label, + num_classes=num_classes, + label_weights=None) + + def testGetLabelWeightMask_withInvalidLabelWeightsLength(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = [0.0, 0.1, 0.2] + + self.assertRaisesWithRegexpMatch( + ValueError, + '^Length of label_weights must be equal to num_classes if it is a list', + utils.get_label_weight_mask, + labels=labels, + ignore_label=ignore_label, + num_classes=num_classes, + label_weights=label_weights) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/core/xception.py b/deeplab/models/research/deeplab/core/xception.py new file mode 100644 index 0000000..f992571 --- /dev/null +++ b/deeplab/models/research/deeplab/core/xception.py @@ -0,0 +1,945 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Xception model. + +"Xception: Deep Learning with Depthwise Separable Convolutions" +Fran{\c{c}}ois Chollet +https://arxiv.org/abs/1610.02357 + +We implement the modified version by Jifeng Dai et al. for their COCO 2017 +detection challenge submission, where the model is made deeper and has aligned +features for dense prediction tasks. See their slides for details: + +"Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge +2017 Entry" +Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei and Jifeng Dai +ICCV 2017 COCO Challenge workshop +http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf + +We made a few more changes on top of MSRA's modifications: +1. Fully convolutional: All the max-pooling layers are replaced with separable + conv2d with stride = 2. This allows us to use atrous convolution to extract + feature maps at any resolution. + +2. We support adding ReLU and BatchNorm after depthwise convolution, motivated + by the design of MobileNetv1. + +"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision +Applications" +Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, +Tobias Weyand, Marco Andreetto, Hartwig Adam +https://arxiv.org/abs/1704.04861 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import utils +from tensorflow.contrib.slim.nets import resnet_utils +from nets.mobilenet import conv_blocks as mobilenet_v3_ops + +slim = contrib_slim + + +_DEFAULT_MULTI_GRID = [1, 1, 1] +# The cap for tf.clip_by_value. +_CLIP_CAP = 6 + + +class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])): + """A named tuple describing an Xception block. + + Its parts are: + scope: The scope of the block. + unit_fn: The Xception unit function which takes as input a tensor and + returns another tensor with the output of the Xception unit. + args: A list of length equal to the number of units in the block. The list + contains one dictionary for each unit in the block to serve as argument to + unit_fn. + """ + + +def fixed_padding(inputs, kernel_size, rate=1): + """Pads the input along the spatial dimensions independently of input size. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + Should be a positive integer. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], + [pad_beg, pad_end], [0, 0]]) + return padded_inputs + + +@slim.add_arg_scope +def separable_conv2d_same(inputs, + num_outputs, + kernel_size, + depth_multiplier, + stride, + rate=1, + use_explicit_padding=True, + regularize_depthwise=False, + scope=None, + **kwargs): + """Strided 2-D separable convolution with 'SAME' padding. + + If stride > 1 and use_explicit_padding is True, then we do explicit zero- + padding, followed by conv2d with 'VALID' padding. + + Note that + + net = separable_conv2d_same(inputs, num_outputs, 3, + depth_multiplier=1, stride=stride) + + is equivalent to + + net = slim.separable_conv2d(inputs, num_outputs, 3, + depth_multiplier=1, stride=1, padding='SAME') + net = resnet_utils.subsample(net, factor=stride) + + whereas + + net = slim.separable_conv2d(inputs, num_outputs, 3, stride=stride, + depth_multiplier=1, padding='SAME') + + is different when the input's height or width is even, which is why we add the + current function. + + Consequently, if the input feature map has even height or width, setting + `use_explicit_padding=False` will result in feature misalignment by one pixel + along the corresponding dimension. + + Args: + inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. + num_outputs: An integer, the number of output filters. + kernel_size: An int with the kernel_size of the filters. + depth_multiplier: The number of depthwise convolution output channels for + each input channel. The total number of depthwise convolution output + channels will be equal to `num_filters_in * depth_multiplier`. + stride: An integer, the output stride. + rate: An integer, rate for atrous convolution. + use_explicit_padding: If True, use explicit padding to make the model fully + compatible with the open source version, otherwise use the native + Tensorflow 'SAME' padding. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + scope: Scope. + **kwargs: additional keyword arguments to pass to slim.conv2d + + Returns: + output: A 4-D tensor of size [batch, height_out, width_out, channels] with + the convolution output. + """ + def _separable_conv2d(padding): + """Wrapper for separable conv2d.""" + return slim.separable_conv2d(inputs, + num_outputs, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + padding=padding, + scope=scope, + **kwargs) + def _split_separable_conv2d(padding): + """Splits separable conv2d into depthwise and pointwise conv2d.""" + outputs = slim.separable_conv2d(inputs, + None, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + padding=padding, + scope=scope + '_depthwise', + **kwargs) + return slim.conv2d(outputs, + num_outputs, + 1, + scope=scope + '_pointwise', + **kwargs) + if stride == 1 or not use_explicit_padding: + if regularize_depthwise: + outputs = _separable_conv2d(padding='SAME') + else: + outputs = _split_separable_conv2d(padding='SAME') + else: + inputs = fixed_padding(inputs, kernel_size, rate) + if regularize_depthwise: + outputs = _separable_conv2d(padding='VALID') + else: + outputs = _split_separable_conv2d(padding='VALID') + return outputs + + +@slim.add_arg_scope +def xception_module(inputs, + depth_list, + skip_connection_type, + stride, + kernel_size=3, + unit_rate_list=None, + rate=1, + activation_fn_in_separable_conv=False, + regularize_depthwise=False, + outputs_collections=None, + scope=None, + use_bounded_activation=False, + use_explicit_padding=True, + use_squeeze_excite=False, + se_pool_size=None): + """An Xception module. + + The output of one Xception module is equal to the sum of `residual` and + `shortcut`, where `residual` is the feature computed by three separable + convolution. The `shortcut` is the feature computed by 1x1 convolution with + or without striding. In some cases, the `shortcut` path could be a simple + identity function or none (i.e, no shortcut). + + Note that we replace the max pooling operations in the Xception module with + another separable convolution with striding, since atrous rate is not properly + supported in current TensorFlow max pooling implementation. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth_list: A list of three integers specifying the depth values of one + Xception module. + skip_connection_type: Skip connection type for the residual path. Only + supports 'conv', 'sum', or 'none'. + stride: The block unit's stride. Determines the amount of downsampling of + the units output compared to its input. + kernel_size: Integer, convolution kernel size. + unit_rate_list: A list of three integers, determining the unit rate for + each separable convolution in the xception module. + rate: An integer, rate for atrous convolution. + activation_fn_in_separable_conv: Includes activation function in the + separable convolution or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + outputs_collections: Collection to add the Xception unit output. + scope: Optional variable_scope. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + use_explicit_padding: If True, use explicit padding to make the model fully + compatible with the open source version, otherwise use the native + Tensorflow 'SAME' padding. + use_squeeze_excite: Boolean, use squeeze-and-excitation or not. + se_pool_size: None or integer specifying the pooling size used in SE module. + + Returns: + The Xception module's output. + + Raises: + ValueError: If depth_list and unit_rate_list do not contain three elements, + or if stride != 1 for the third separable convolution operation in the + residual path, or unsupported skip connection type. + """ + if len(depth_list) != 3: + raise ValueError('Expect three elements in depth_list.') + if unit_rate_list: + if len(unit_rate_list) != 3: + raise ValueError('Expect three elements in unit_rate_list.') + + with tf.variable_scope(scope, 'xception_module', [inputs]) as sc: + residual = inputs + + def _separable_conv(features, depth, kernel_size, depth_multiplier, + regularize_depthwise, rate, stride, scope): + """Separable conv block.""" + if activation_fn_in_separable_conv: + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + else: + if use_bounded_activation: + # When use_bounded_activation is True, we clip the feature values and + # apply relu6 for activation. + activation_fn = lambda x: tf.clip_by_value(x, -_CLIP_CAP, _CLIP_CAP) + features = tf.nn.relu6(features) + else: + # Original network design. + activation_fn = None + features = tf.nn.relu(features) + return separable_conv2d_same(features, + depth, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + activation_fn=activation_fn, + use_explicit_padding=use_explicit_padding, + regularize_depthwise=regularize_depthwise, + scope=scope) + for i in range(3): + residual = _separable_conv(residual, + depth_list[i], + kernel_size=kernel_size, + depth_multiplier=1, + regularize_depthwise=regularize_depthwise, + rate=rate*unit_rate_list[i], + stride=stride if i == 2 else 1, + scope='separable_conv' + str(i+1)) + if use_squeeze_excite: + residual = mobilenet_v3_ops.squeeze_excite( + input_tensor=residual, + squeeze_factor=16, + inner_activation_fn=tf.nn.relu, + gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667, + pool=se_pool_size) + + if skip_connection_type == 'conv': + shortcut = slim.conv2d(inputs, + depth_list[-1], + [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + if use_bounded_activation: + residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP) + shortcut = tf.clip_by_value(shortcut, -_CLIP_CAP, _CLIP_CAP) + outputs = residual + shortcut + if use_bounded_activation: + outputs = tf.nn.relu6(outputs) + elif skip_connection_type == 'sum': + if use_bounded_activation: + residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP) + inputs = tf.clip_by_value(inputs, -_CLIP_CAP, _CLIP_CAP) + outputs = residual + inputs + if use_bounded_activation: + outputs = tf.nn.relu6(outputs) + elif skip_connection_type == 'none': + outputs = residual + else: + raise ValueError('Unsupported skip connection type.') + + return slim.utils.collect_named_outputs(outputs_collections, + sc.name, + outputs) + + +@slim.add_arg_scope +def stack_blocks_dense(net, + blocks, + output_stride=None, + outputs_collections=None): + """Stacks Xception blocks and controls output feature density. + + First, this function creates scopes for the Xception in the form of + 'block_name/unit_1', 'block_name/unit_2', etc. + + Second, this function allows the user to explicitly control the output + stride, which is the ratio of the input to output spatial resolution. This + is useful for dense prediction tasks such as semantic segmentation or + object detection. + + Control of the output feature density is implemented by atrous convolution. + + Args: + net: A tensor of size [batch, height, width, channels]. + blocks: A list of length equal to the number of Xception blocks. Each + element is an Xception Block object describing the units in the block. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution, which needs to be equal to + the product of unit strides from the start up to some level of Xception. + For example, if the Xception employs units with strides 1, 2, 1, 3, 4, 1, + then valid values for the output_stride are 1, 2, 6, 24 or None (which + is equivalent to output_stride=24). + outputs_collections: Collection to add the Xception block outputs. + + Returns: + net: Output tensor with stride equal to the specified output_stride. + + Raises: + ValueError: If the target output_stride is not valid. + """ + # The current_stride variable keeps track of the effective stride of the + # activations. This allows us to invoke atrous convolution whenever applying + # the next residual unit would result in the activations having stride larger + # than the target output_stride. + current_stride = 1 + + # The atrous convolution rate parameter. + rate = 1 + + for block in blocks: + with tf.variable_scope(block.scope, 'block', [net]) as sc: + for i, unit in enumerate(block.args): + if output_stride is not None and current_stride > output_stride: + raise ValueError('The target output_stride cannot be reached.') + with tf.variable_scope('unit_%d' % (i + 1), values=[net]): + # If we have reached the target output_stride, then we need to employ + # atrous convolution with stride=1 and multiply the atrous rate by the + # current unit's stride for use in subsequent layers. + if output_stride is not None and current_stride == output_stride: + net = block.unit_fn(net, rate=rate, **dict(unit, stride=1)) + rate *= unit.get('stride', 1) + else: + net = block.unit_fn(net, rate=1, **unit) + current_stride *= unit.get('stride', 1) + + # Collect activations at the block's end before performing subsampling. + net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) + + if output_stride is not None and current_stride != output_stride: + raise ValueError('The target output_stride cannot be reached.') + + return net + + +def xception(inputs, + blocks, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + reuse=None, + scope=None, + sync_batch_norm_method='None'): + """Generator for Xception models. + + This function generates a family of Xception models. See the xception_*() + methods for specific model instantiations, obtained by selecting different + block instantiations that produce Xception of various depths. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. Must be + floating point. If a pretrained checkpoint is used, pixel values should be + the same as during training (see go/slim-classification-models for + specifics). + blocks: A list of length equal to the number of Xception blocks. Each + element is an Xception Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. + If 0 or None, we return the features before the logit layer. + is_training: whether batch_norm layers are in training mode. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + keep_prob: Keep probability used in the pre-logits dropout layer. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. Currently only + support `None`. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is 0 or None, + then net is the output of the last Xception block, potentially after + global average pooling. If num_classes is a non-zero integer, net contains + the pre-softmax activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + with tf.variable_scope( + scope, 'xception', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + 'end_points' + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with slim.arg_scope([slim.conv2d, + slim.separable_conv2d, + xception_module, + stack_blocks_dense], + outputs_collections=end_points_collection): + with slim.arg_scope([batch_norm], is_training=is_training): + net = inputs + if output_stride is not None: + if output_stride % 2 != 0: + raise ValueError('The output_stride needs to be a multiple of 2.') + output_stride //= 2 + # Root block function operated on inputs. + net = resnet_utils.conv2d_same(net, 32, 3, stride=2, + scope='entry_flow/conv1_1') + net = resnet_utils.conv2d_same(net, 64, 3, stride=1, + scope='entry_flow/conv1_2') + + # Extract features for entry_flow, middle_flow, and exit_flow. + net = stack_blocks_dense(net, blocks, output_stride) + + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection, clear_collection=True) + + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True) + end_points['global_pool'] = net + if num_classes: + net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training, + scope='prelogits_dropout') + net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits') + end_points[sc.name + '/logits'] = net + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def xception_block(scope, + depth_list, + skip_connection_type, + activation_fn_in_separable_conv, + regularize_depthwise, + num_units, + stride, + kernel_size=3, + unit_rate_list=None, + use_squeeze_excite=False, + se_pool_size=None): + """Helper function for creating a Xception block. + + Args: + scope: The scope of the block. + depth_list: The depth of the bottleneck layer for each unit. + skip_connection_type: Skip connection type for the residual path. Only + supports 'conv', 'sum', or 'none'. + activation_fn_in_separable_conv: Includes activation function in the + separable convolution or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + kernel_size: Integer, convolution kernel size. + unit_rate_list: A list of three integers, determining the unit rate in the + corresponding xception block. + use_squeeze_excite: Boolean, use squeeze-and-excitation or not. + se_pool_size: None or integer specifying the pooling size used in SE module. + + Returns: + An Xception block. + """ + if unit_rate_list is None: + unit_rate_list = _DEFAULT_MULTI_GRID + return Block(scope, xception_module, [{ + 'depth_list': depth_list, + 'skip_connection_type': skip_connection_type, + 'activation_fn_in_separable_conv': activation_fn_in_separable_conv, + 'regularize_depthwise': regularize_depthwise, + 'stride': stride, + 'kernel_size': kernel_size, + 'unit_rate_list': unit_rate_list, + 'use_squeeze_excite': use_squeeze_excite, + 'se_pool_size': se_pool_size, + }] * num_units) + + +def xception_41(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_41', + sync_batch_norm_method='None'): + """Xception-41 model.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('entry_flow/block3', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=8, + stride=1), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + unit_rate_list=multi_grid), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_65_factory(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + kernel_size=3, + multi_grid=None, + reuse=None, + use_squeeze_excite=False, + se_pool_size=None, + scope='xception_65', + sync_batch_norm_method='None'): + """Xception-65 model factory.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block3', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=16, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + unit_rate_list=multi_grid, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_65(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_65', + sync_batch_norm_method='None'): + """Xception-65 model.""" + return xception_65_factory( + inputs=inputs, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=scope, + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_71_factory(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + kernel_size=3, + multi_grid=None, + reuse=None, + scope='xception_71', + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method='None'): + """Xception-71 model factory.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block3', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block4', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('entry_flow/block5', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=16, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + unit_rate_list=multi_grid, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_71(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_71', + sync_batch_norm_method='None'): + """Xception-71 model.""" + return xception_71_factory( + inputs=inputs, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=scope, + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_arg_scope(weight_decay=0.00004, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + batch_norm_scale=True, + weights_initializer_stddev=0.09, + regularize_depthwise=False, + use_batch_norm=True, + use_bounded_activation=False, + sync_batch_norm_method='None'): + """Defines the default Xception arg scope. + + Args: + weight_decay: The weight decay to use for regularizing the model. + batch_norm_decay: The moving average decay when estimating layer activation + statistics in batch normalization. + batch_norm_epsilon: Small constant to prevent division by zero when + normalizing activations by their variance in batch normalization. + batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + weights_initializer_stddev: The standard deviation of the trunctated normal + weight initializer. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + use_batch_norm: Whether or not to use batch normalization. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + sync_batch_norm_method: String, sync batchnorm method. Currently only + support `None`. Also, it is only effective for Xception. + + Returns: + An `arg_scope` to use for the Xception models. + """ + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + 'scale': batch_norm_scale, + } + if regularize_depthwise: + depthwise_regularizer = slim.l2_regularizer(weight_decay) + else: + depthwise_regularizer = None + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_initializer=tf.truncated_normal_initializer( + stddev=weights_initializer_stddev), + activation_fn=activation_fn, + normalizer_fn=batch_norm if use_batch_norm else None): + with slim.arg_scope([batch_norm], **batch_norm_params): + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay)): + with slim.arg_scope( + [slim.separable_conv2d], + weights_regularizer=depthwise_regularizer): + with slim.arg_scope( + [xception_module], + use_bounded_activation=use_bounded_activation, + use_explicit_padding=not use_bounded_activation) as arg_sc: + return arg_sc diff --git a/deeplab/models/research/deeplab/core/xception_test.py b/deeplab/models/research/deeplab/core/xception_test.py new file mode 100644 index 0000000..fc338da --- /dev/null +++ b/deeplab/models/research/deeplab/core/xception_test.py @@ -0,0 +1,488 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for xception.py.""" +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import xception +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Create test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.cast( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels]), + tf.float32) + + +class UtilityFunctionTest(tf.test.TestCase): + + def testSeparableConv2DSameWithInputEvenSize(self): + n, n2 = 4, 2 + + # Input image. + x = create_test_input(1, n, n, 1) + + # Convolution kernel. + dw = create_test_input(1, 3, 3, 1) + dw = tf.reshape(dw, [3, 3, 1, 1]) + + tf.get_variable('Conv/depthwise_weights', initializer=dw) + tf.get_variable('Conv/pointwise_weights', + initializer=tf.ones([1, 1, 1, 1])) + tf.get_variable('Conv/biases', initializer=tf.zeros([1])) + tf.get_variable_scope().reuse_variables() + + y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=1, scope='Conv') + y1_expected = tf.cast([[14, 28, 43, 26], + [28, 48, 66, 37], + [43, 66, 84, 46], + [26, 37, 46, 22]], tf.float32) + y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) + + y2 = resnet_utils.subsample(y1, 2) + y2_expected = tf.cast([[14, 43], + [43, 84]], tf.float32) + y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) + + y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, + regularize_depthwise=True, + stride=2, scope='Conv') + y3_expected = y2_expected + + y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=2, scope='Conv') + y4_expected = tf.cast([[48, 37], + [37, 22]], tf.float32) + y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertAllClose(y1.eval(), y1_expected.eval()) + self.assertAllClose(y2.eval(), y2_expected.eval()) + self.assertAllClose(y3.eval(), y3_expected.eval()) + self.assertAllClose(y4.eval(), y4_expected.eval()) + + def testSeparableConv2DSameWithInputOddSize(self): + n, n2 = 5, 3 + + # Input image. + x = create_test_input(1, n, n, 1) + + # Convolution kernel. + dw = create_test_input(1, 3, 3, 1) + dw = tf.reshape(dw, [3, 3, 1, 1]) + + tf.get_variable('Conv/depthwise_weights', initializer=dw) + tf.get_variable('Conv/pointwise_weights', + initializer=tf.ones([1, 1, 1, 1])) + tf.get_variable('Conv/biases', initializer=tf.zeros([1])) + tf.get_variable_scope().reuse_variables() + + y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=1, scope='Conv') + y1_expected = tf.cast([[14, 28, 43, 58, 34], + [28, 48, 66, 84, 46], + [43, 66, 84, 102, 55], + [58, 84, 102, 120, 64], + [34, 46, 55, 64, 30]], tf.float32) + y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) + + y2 = resnet_utils.subsample(y1, 2) + y2_expected = tf.cast([[14, 43, 34], + [43, 84, 55], + [34, 55, 30]], tf.float32) + y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) + + y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, + regularize_depthwise=True, + stride=2, scope='Conv') + y3_expected = y2_expected + + y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=2, scope='Conv') + y4_expected = y2_expected + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertAllClose(y1.eval(), y1_expected.eval()) + self.assertAllClose(y2.eval(), y2_expected.eval()) + self.assertAllClose(y3.eval(), y3_expected.eval()) + self.assertAllClose(y4.eval(), y4_expected.eval()) + + +class XceptionNetworkTest(tf.test.TestCase): + """Tests with small Xception network.""" + + def _xception_small(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + regularize_depthwise=True, + reuse=None, + scope='xception_small'): + """A shallow and thin Xception for faster tests.""" + block = xception.xception_block + blocks = [ + block('entry_flow/block1', + depth_list=[1, 1, 1], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('entry_flow/block2', + depth_list=[2, 2, 2], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('entry_flow/block3', + depth_list=[4, 4, 4], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1), + block('entry_flow/block4', + depth_list=[4, 4, 4], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('middle_flow/block1', + depth_list=[4, 4, 4], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=2, + stride=1), + block('exit_flow/block1', + depth_list=[8, 8, 8], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('exit_flow/block2', + depth_list=[16, 16, 16], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1), + ] + return xception.xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope) + + def testClassificationEndPoints(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + logits, end_points = self._xception_small( + inputs, + num_classes=num_classes, + global_pool=global_pool, + scope='xception') + self.assertTrue( + logits.op.name.startswith('xception/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertTrue('predictions' in end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + self.assertTrue('global_pool' in end_points) + self.assertListEqual(end_points['global_pool'].get_shape().as_list(), + [2, 1, 1, 16]) + + def testEndpointNames(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes=num_classes, + global_pool=global_pool, + scope='xception') + expected = [ + 'xception/entry_flow/conv1_1', + 'xception/entry_flow/conv1_2', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block1/unit_1/xception_module/shortcut', + 'xception/entry_flow/block1/unit_1/xception_module', + 'xception/entry_flow/block1', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block2/unit_1/xception_module/shortcut', + 'xception/entry_flow/block2/unit_1/xception_module', + 'xception/entry_flow/block2', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block3/unit_1/xception_module/shortcut', + 'xception/entry_flow/block3/unit_1/xception_module', + 'xception/entry_flow/block3', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block4/unit_1/xception_module/shortcut', + 'xception/entry_flow/block4/unit_1/xception_module', + 'xception/entry_flow/block4', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/middle_flow/block1/unit_1/xception_module', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv1', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv2', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv3', + 'xception/middle_flow/block1/unit_2/xception_module', + 'xception/middle_flow/block1', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/exit_flow/block1/unit_1/xception_module/shortcut', + 'xception/exit_flow/block1/unit_1/xception_module', + 'xception/exit_flow/block1', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv1', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv2', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv3', + 'xception/exit_flow/block2/unit_1/xception_module', + 'xception/exit_flow/block2', + 'global_pool', + 'xception/logits', + 'predictions', + ] + self.assertItemsEqual(list(end_points.keys()), expected) + + def testClassificationShapes(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 64, 64, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/conv1_1': [2, 32, 32, 32], + 'xception/entry_flow/block1': [2, 16, 16, 1], + 'xception/entry_flow/block2': [2, 8, 8, 2], + 'xception/entry_flow/block4': [2, 4, 4, 4], + 'xception/middle_flow/block1': [2, 4, 4, 4], + 'xception/exit_flow/block1': [2, 2, 2, 8], + 'xception/exit_flow/block2': [2, 2, 2, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 3 + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/conv1_1': [2, 33, 33, 32], + 'xception/entry_flow/block1': [2, 17, 17, 1], + 'xception/entry_flow/block2': [2, 9, 9, 2], + 'xception/entry_flow/block4': [2, 5, 5, 4], + 'xception/middle_flow/block1': [2, 5, 5, 4], + 'xception/exit_flow/block1': [2, 3, 3, 8], + 'xception/exit_flow/block2': [2, 3, 3, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 3 + output_stride = 8 + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/block1': [2, 17, 17, 1], + 'xception/entry_flow/block2': [2, 9, 9, 2], + 'xception/entry_flow/block4': [2, 9, 9, 4], + 'xception/middle_flow/block1': [2, 9, 9, 4], + 'xception/exit_flow/block1': [2, 9, 9, 8], + 'xception/exit_flow/block2': [2, 9, 9, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValues(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(xception.xception_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 96, 97, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._xception_small( + inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._xception_small( + inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-5, rtol=1e-5) + + def testUnknownBatchSize(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(xception.xception_arg_scope()): + logits, _ = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + self.assertTrue(logits.op.name.startswith('xception/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(xception.xception_arg_scope()): + output, _ = self._xception_small( + inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 16]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 3, 3, 16)) + + def testAtrousFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(xception.xception_arg_scope()): + output, _ = self._xception_small( + inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 16]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 9, 9, 16)) + + def testEndpointsReuse(self): + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points0 = xception.xception_65( + inputs, + num_classes=10, + reuse=False) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points1 = xception.xception_65( + inputs, + num_classes=10, + reuse=True) + self.assertItemsEqual(list(end_points0.keys()), list(end_points1.keys())) + + def testUseBoundedAcitvation(self): + global_pool = False + num_classes = 3 + output_stride = 16 + for use_bounded_activation in (True, False): + tf.reset_default_graph() + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope( + use_bounded_activation=use_bounded_activation)): + _, _ = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='xception') + for node in tf.get_default_graph().as_graph_def().node: + if node.op.startswith('Relu'): + self.assertEqual(node.op == 'Relu6', use_bounded_activation) + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/train.txt b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/train.txt new file mode 100644 index 0000000..4cb3d73 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/train.txt @@ -0,0 +1,3 @@ +Heart_O_img_0 +Heart_O_img_1 +Heart_O_img_2 \ No newline at end of file diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/trainval.txt b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/trainval.txt new file mode 100644 index 0000000..8c158fb --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/trainval.txt @@ -0,0 +1,5 @@ +Heart_O_img_0 +Heart_O_img_1 +Heart_O_img_2 +Heart_O_img_3 +Heart_O_img_4 \ No newline at end of file diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/val.txt b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/val.txt new file mode 100644 index 0000000..c154e99 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/PQR/dataset/ImageSets/val.txt @@ -0,0 +1,2 @@ +Heart_O_img_3 +Heart_O_img_4 \ No newline at end of file diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_0.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_0.jpg new file mode 100644 index 0000000..fcbc9ab Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_0.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_1.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_1.jpg new file mode 100644 index 0000000..b35cac0 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_1.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_2.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_2.jpg new file mode 100644 index 0000000..5febf5e Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_2.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_3.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_3.jpg new file mode 100644 index 0000000..8cbabc2 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_3.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_4.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_4.jpg new file mode 100644 index 0000000..1212a0f Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_4.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_0.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_0.jpg new file mode 100644 index 0000000..ca48633 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_0.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_1.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_1.jpg new file mode 100644 index 0000000..0d0f771 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_1.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_2.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_2.jpg new file mode 100644 index 0000000..56d3ed3 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_2.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_3.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_3.jpg new file mode 100644 index 0000000..18dd87a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_3.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_4.jpg b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_4.jpg new file mode 100644 index 0000000..e517088 Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClass/Heart_O_img_4.jpg differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_0.png b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_0.png new file mode 100644 index 0000000..fb1b47a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_0.png differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_1.png b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_1.png new file mode 100644 index 0000000..fb1b47a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_1.png differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_2.png b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_2.png new file mode 100644 index 0000000..fb1b47a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_2.png differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_3.png b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_3.png new file mode 100644 index 0000000..fb1b47a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_3.png differ diff --git a/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_4.png b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_4.png new file mode 100644 index 0000000..fb1b47a Binary files /dev/null and b/deeplab/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw/Heart_O_img_4.png differ diff --git a/deeplab/models/research/deeplab/datasets/__init__.py b/deeplab/models/research/deeplab/datasets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/datasets/build_ade20k_data.py b/deeplab/models/research/deeplab/datasets/build_ade20k_data.py new file mode 100644 index 0000000..fc04ed0 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/build_ade20k_data.py @@ -0,0 +1,123 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts ADE20K data to TFRecord file format with Example protos.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import os +import random +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string( + 'train_image_folder', + './ADE20K/ADEChallengeData2016/images/training', + 'Folder containing trainng images') +tf.app.flags.DEFINE_string( + 'train_image_label_folder', + './ADE20K/ADEChallengeData2016/annotations/training', + 'Folder containing annotations for trainng images') + +tf.app.flags.DEFINE_string( + 'val_image_folder', + './ADE20K/ADEChallengeData2016/images/validation', + 'Folder containing validation images') + +tf.app.flags.DEFINE_string( + 'val_image_label_folder', + './ADE20K/ADEChallengeData2016/annotations/validation', + 'Folder containing annotations for validation') + +tf.app.flags.DEFINE_string( + 'output_dir', './ADE20K/tfrecord', + 'Path to save converted tfrecord of Tensorflow example') + +_NUM_SHARDS = 4 + + +def _convert_dataset(dataset_split, dataset_dir, dataset_label_dir): + """Converts the ADE20k dataset into into tfrecord format. + + Args: + dataset_split: Dataset split (e.g., train, val). + dataset_dir: Dir in which the dataset locates. + dataset_label_dir: Dir in which the annotations locates. + + Raises: + RuntimeError: If loaded image and label have different shape. + """ + + img_names = tf.gfile.Glob(os.path.join(dataset_dir, '*.jpg')) + random.shuffle(img_names) + seg_names = [] + for f in img_names: + # get the filename without the extension + basename = os.path.basename(f).split('.')[0] + # cover its corresponding *_seg.png + seg = os.path.join(dataset_label_dir, basename+'.png') + seg_names.append(seg) + + num_images = len(img_names) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('jpeg', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + output_filename = os.path.join( + FLAGS.output_dir, + '%s-%05d-of-%05d.tfrecord' % (dataset_split, shard_id, _NUM_SHARDS)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, num_images, shard_id)) + sys.stdout.flush() + # Read the image. + image_filename = img_names[i] + image_data = tf.gfile.FastGFile(image_filename, 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_filename = seg_names[i] + seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + example = build_data.image_seg_to_tfexample( + image_data, img_names[i], height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + tf.gfile.MakeDirs(FLAGS.output_dir) + _convert_dataset( + 'train', FLAGS.train_image_folder, FLAGS.train_image_label_folder) + _convert_dataset('val', FLAGS.val_image_folder, FLAGS.val_image_label_folder) + + +if __name__ == '__main__': + tf.app.run() diff --git a/deeplab/models/research/deeplab/datasets/build_cityscapes_data.py b/deeplab/models/research/deeplab/datasets/build_cityscapes_data.py new file mode 100644 index 0000000..53c11e3 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/build_cityscapes_data.py @@ -0,0 +1,198 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts Cityscapes data to TFRecord file format with Example protos. + +The Cityscapes dataset is expected to have the following directory structure: + + + cityscapes + - build_cityscapes_data.py (current working directiory). + - build_data.py + + cityscapesscripts + + annotation + + evaluation + + helpers + + preparation + + viewer + + gtFine + + train + + val + + test + + leftImg8bit + + train + + val + + test + + tfrecord + +This script converts data into sharded data files and save at tfrecord folder. + +Note that before running this script, the users should (1) register the +Cityscapes dataset website at https://www.cityscapes-dataset.com to +download the dataset, and (2) run the script provided by Cityscapes +`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth. + +Also note that the tensorflow model will be trained with `TrainId' instead +of `EvalId' used on the evaluation server. Thus, the users need to convert +the predicted labels to `EvalId` for evaluation on the server. See the +vis.py for more details. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import glob +import math +import os.path +import re +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('cityscapes_root', + './cityscapes', + 'Cityscapes dataset root folder.') + +tf.app.flags.DEFINE_string( + 'output_dir', + './tfrecord', + 'Path to save converted SSTable of TensorFlow examples.') + + +_NUM_SHARDS = 10 + +# A map from data type to folder name that saves the data. +_FOLDERS_MAP = { + 'image': 'leftImg8bit', + 'label': 'gtFine', +} + +# A map from data type to filename postfix. +_POSTFIX_MAP = { + 'image': '_leftImg8bit', + 'label': '_gtFine_labelTrainIds', +} + +# A map from data type to data format. +_DATA_FORMAT_MAP = { + 'image': 'png', + 'label': 'png', +} + +# Image file pattern. +_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image']) + + +def _get_files(data, dataset_split): + """Gets files for the specified data type and dataset split. + + Args: + data: String, desired data ('image' or 'label'). + dataset_split: String, dataset split ('train_fine', 'val_fine', 'test_fine') + + Returns: + A list of sorted file names or None when getting label for + test set. + """ + if dataset_split == 'train_fine': + split_dir = 'train' + elif dataset_split == 'val_fine': + split_dir = 'val' + elif dataset_split == 'test_fine': + split_dir = 'test' + else: + raise RuntimeError("Split {} is not supported".format(dataset_split)) + pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data]) + search_files = os.path.join( + FLAGS.cityscapes_root, _FOLDERS_MAP[data], split_dir, '*', pattern) + filenames = glob.glob(search_files) + return sorted(filenames) + + +def _convert_dataset(dataset_split): + """Converts the specified dataset split to TFRecord format. + + Args: + dataset_split: The dataset split (e.g., train_fine, val_fine). + + Raises: + RuntimeError: If loaded image and label have different shape, or if the + image file with specified postfix could not be found. + """ + image_files = _get_files('image', dataset_split) + label_files = _get_files('label', dataset_split) + + num_images = len(image_files) + num_labels = len(label_files) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + if num_images != num_labels: + raise RuntimeError("The number of images and labels doesn't match: {} {}".format(num_images, num_labels)) + + image_reader = build_data.ImageReader('png', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + shard_filename = '%s-%05d-of-%05d.tfrecord' % ( + dataset_split, shard_id, _NUM_SHARDS) + output_filename = os.path.join(FLAGS.output_dir, shard_filename) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, num_images, shard_id)) + sys.stdout.flush() + # Read the image. + image_data = tf.gfile.FastGFile(image_files[i], 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + re_match = _IMAGE_FILENAME_RE.search(image_files[i]) + if re_match is None: + raise RuntimeError('Invalid image filename: ' + image_files[i]) + filename = os.path.basename(re_match.group(1)) + example = build_data.image_seg_to_tfexample( + image_data, filename, height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + # Only support converting 'train_fine', 'val_fine' and 'test_fine' sets for now. + for dataset_split in ['train_fine', 'val_fine', 'test_fine']: + _convert_dataset(dataset_split) + + +if __name__ == '__main__': + tf.app.run() diff --git a/deeplab/models/research/deeplab/datasets/build_data.py b/deeplab/models/research/deeplab/datasets/build_data.py new file mode 100644 index 0000000..4562867 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/build_data.py @@ -0,0 +1,161 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains common utility functions and classes for building dataset. + +This script contains utility functions and classes to converts dataset to +TFRecord file format with Example protos. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +import collections +import six +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_enum('image_format', 'png', ['jpg', 'jpeg', 'png'], + 'Image format.') + +tf.app.flags.DEFINE_enum('label_format', 'png', ['png'], + 'Segmentation label format.') + +# A map from image format to expected data format. +_IMAGE_FORMAT_MAP = { + 'jpg': 'jpeg', + 'jpeg': 'jpeg', + 'png': 'png', +} + + +class ImageReader(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self, image_format='jpeg', channels=3): + """Class constructor. + + Args: + image_format: Image format. Only 'jpeg', 'jpg', or 'png' are supported. + channels: Image channels. + """ + with tf.Graph().as_default(): + self._decode_data = tf.placeholder(dtype=tf.string) + self._image_format = image_format + self._session = tf.Session() + if self._image_format in ('jpeg', 'jpg'): + self._decode = tf.image.decode_jpeg(self._decode_data, + channels=channels) + elif self._image_format == 'png': + self._decode = tf.image.decode_png(self._decode_data, + channels=channels) + + def read_image_dims(self, image_data): + """Reads the image dimensions. + + Args: + image_data: string of image data. + + Returns: + image_height and image_width. + """ + image = self.decode_image(image_data) + return image.shape[:2] + + def decode_image(self, image_data): + """Decodes the image data string. + + Args: + image_data: string of image data. + + Returns: + Decoded image data. + + Raises: + ValueError: Value of image channels not supported. + """ + image = self._session.run(self._decode, + feed_dict={self._decode_data: image_data}) + if len(image.shape) != 3 or image.shape[2] not in (1, 3): + raise ValueError('The image channels not supported.') + + return image + + +def _int64_list_feature(values): + """Returns a TF-Feature of int64_list. + + Args: + values: A scalar or list of values. + + Returns: + A TF-Feature. + """ + if not isinstance(values, collections.Iterable): + values = [values] + + return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) + + +def _bytes_list_feature(values): + """Returns a TF-Feature of bytes. + + Args: + values: A string. + + Returns: + A TF-Feature. + """ + def norm2bytes(value): + return value.encode() if isinstance(value, str) and six.PY3 else value + + return tf.train.Feature( + bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) + + +def image_seg_to_tfexample(image_data, filename, height, width, seg_data): + """Converts one image/segmentation pair to tf example. + + Args: + image_data: string of image data. + filename: image filename. + height: image height. + width: image width. + seg_data: string of semantic segmentation data. + + Returns: + tf example of one image/segmentation pair. + """ + return tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': _bytes_list_feature(image_data), + 'image/filename': _bytes_list_feature(filename), + 'image/format': _bytes_list_feature( + _IMAGE_FORMAT_MAP[FLAGS.image_format]), + 'image/height': _int64_list_feature(height), + 'image/width': _int64_list_feature(width), + 'image/channels': _int64_list_feature(3), + 'image/segmentation/class/encoded': ( + _bytes_list_feature(seg_data)), + 'image/segmentation/class/format': _bytes_list_feature( + FLAGS.label_format), + })) diff --git a/deeplab/models/research/deeplab/datasets/build_new_pqr_data.py b/deeplab/models/research/deeplab/datasets/build_new_pqr_data.py new file mode 100644 index 0000000..defeb35 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/build_new_pqr_data.py @@ -0,0 +1,102 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import os +import random +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +cwd = os.getcwd() + +tf.app.flags.DEFINE_string('image_folder', + './PQR/JPEGImages', + 'Folder containing images.') + +tf.app.flags.DEFINE_string( +'semantic_segmentation_folder', +'./PQR/SegmentationClassRaw', +'Folder containing semantic segmentation annotations.') + +tf.app.flags.DEFINE_string( +'list_folder', +'./PQR/ImageSets', +'Folder containing lists for training and validation') + +tf.app.flags.DEFINE_string( +'output_dir', +'./PQR/tfrecord', +'Path to save converted SSTable of TensorFlow examples.') + +_NUM_SHARDS = 4 + + +def _convert_dataset(dataset_split): + """Converts the specified dataset split to TFRecord format. + + Args: + dataset_split: The dataset split (e.g., train, test). + + Raises: + RuntimeError: If loaded image and label have different shape. + """ + dataset = os.path.basename(dataset_split)[:-4] + sys.stdout.write('Processing ' + dataset) + filenames = [x.strip('\n') for x in open(dataset_split, 'r')] + num_images = len(filenames) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('jpeg', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + output_filename = os.path.join( + FLAGS.output_dir, + '%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, len(filenames), shard_id)) + sys.stdout.flush() + # Read the image. + sys.stdout.write(FLAGS.image_folder) + image_filename = os.path.join( + FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format) + image_data = tf.gfile.GFile(image_filename, 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_filename = os.path.join( + FLAGS.semantic_segmentation_folder, + filenames[i] + '.' + FLAGS.label_format) + seg_data = tf.gfile.GFile(seg_filename, 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + example = build_data.image_seg_to_tfexample( + image_data, filenames[i], height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +# def main(unused_argv): +# tf.gfile.MakeDirs(FLAGS.output_dir) +# _convert_dataset( +# 'train', FLAGS.train_image_folder, FLAGS.train_image_label_folder) +# _convert_dataset('val', FLAGS.val_image_folder, FLAGS.val_image_label_folder) + +def main(unused_argv): + dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt')) + for dataset_split in dataset_splits: + _convert_dataset(dataset_split) + + +if __name__ == '__main__': + tf.app.run() \ No newline at end of file diff --git a/deeplab/models/research/deeplab/datasets/build_voc2012_data.py b/deeplab/models/research/deeplab/datasets/build_voc2012_data.py new file mode 100644 index 0000000..f0bdecb --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/build_voc2012_data.py @@ -0,0 +1,146 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts PASCAL VOC 2012 data to TFRecord file format with Example protos. + +PASCAL VOC 2012 dataset is expected to have the following directory structure: + + + pascal_voc_seg + - build_data.py + - build_voc2012_data.py (current working directory). + + VOCdevkit + + VOC2012 + + JPEGImages + + SegmentationClass + + ImageSets + + Segmentation + + tfrecord + +Image folder: + ./VOCdevkit/VOC2012/JPEGImages + +Semantic segmentation annotations: + ./VOCdevkit/VOC2012/SegmentationClass + +list folder: + ./VOCdevkit/VOC2012/ImageSets/Segmentation + +This script converts data into sharded data files and save at tfrecord folder. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import os.path +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('image_folder', + './VOCdevkit/VOC2012/JPEGImages', + 'Folder containing images.') + +tf.app.flags.DEFINE_string( + 'semantic_segmentation_folder', + './VOCdevkit/VOC2012/SegmentationClassRaw', + 'Folder containing semantic segmentation annotations.') + +tf.app.flags.DEFINE_string( + 'list_folder', + './VOCdevkit/VOC2012/ImageSets/Segmentation', + 'Folder containing lists for training and validation') + +tf.app.flags.DEFINE_string( + 'output_dir', + './tfrecord', + 'Path to save converted SSTable of TensorFlow examples.') + + +_NUM_SHARDS = 4 + + +def _convert_dataset(dataset_split): + """Converts the specified dataset split to TFRecord format. + + Args: + dataset_split: The dataset split (e.g., train, test). + + Raises: + RuntimeError: If loaded image and label have different shape. + """ + dataset = os.path.basename(dataset_split)[:-4] + sys.stdout.write('Processing ' + dataset) + filenames = [x.strip('\n') for x in open(dataset_split, 'r')] + num_images = len(filenames) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('jpeg', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + output_filename = os.path.join( + FLAGS.output_dir, + '%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, len(filenames), shard_id)) + sys.stdout.flush() + # Read the image. + image_filename = os.path.join( + FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format) + image_data = tf.gfile.GFile(image_filename, 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_filename = os.path.join( + FLAGS.semantic_segmentation_folder, + filenames[i] + '.' + FLAGS.label_format) + seg_data = tf.gfile.GFile(seg_filename, 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + example = build_data.image_seg_to_tfexample( + image_data, filenames[i], height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt')) + for dataset_split in dataset_splits: + _convert_dataset(dataset_split) + + +if __name__ == '__main__': + tf.app.run() diff --git a/deeplab/models/research/deeplab/datasets/convert_cityscapes.sh b/deeplab/models/research/deeplab/datasets/convert_cityscapes.sh new file mode 100644 index 0000000..ddc39fb --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/convert_cityscapes.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to preprocess the Cityscapes dataset. Note (1) the users should +# register the Cityscapes dataset website at +# https://www.cityscapes-dataset.com/downloads/ to download the dataset, +# and (2) the users should download the utility scripts provided by +# Cityscapes at https://github.com/mcordts/cityscapesScripts. +# +# Usage: +# bash ./convert_cityscapes.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_cityscapes_data.py +# - convert_cityscapes.sh +# + cityscapes +# + cityscapesscripts (downloaded scripts) +# + gtFine +# + leftImg8bit +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="." + +# Root path for Cityscapes dataset. +CITYSCAPES_ROOT="${WORK_DIR}/cityscapes" + +export PYTHONPATH="${CITYSCAPES_ROOT}:${PYTHONPATH}" + +# Create training labels. +python "${CITYSCAPES_ROOT}/cityscapesscripts/preparation/createTrainIdLabelImgs.py" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${CITYSCAPES_ROOT}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +BUILD_SCRIPT="${CURRENT_DIR}/build_cityscapes_data.py" + +echo "Converting Cityscapes dataset..." +python "${BUILD_SCRIPT}" \ + --cityscapes_root="${CITYSCAPES_ROOT}" \ + --output_dir="${OUTPUT_DIR}" \ diff --git a/deeplab/models/research/deeplab/datasets/convert_pqr.sh b/deeplab/models/research/deeplab/datasets/convert_pqr.sh new file mode 100644 index 0000000..845865d --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/convert_pqr.sh @@ -0,0 +1,30 @@ +CURRENT_DIR=$(pwd) +# WORK_DIR="./PQR" +WORK_DIR = "${CURRENT_DIR}/PQR" +PQR_ROOT="${WORK_DIR}/dataset" +SEG_FOLDER="${PQR_ROOT}/SegmentationClass" +SEMANTIC_SEG_FOLDER="${PQR_ROOT}/SegmentationClassRaw" + +echo "Removing the color map in ground truth annotations..." +python3.7 remove_gt_colormap.py \ + --original_gt_folder="${SEG_FOLDER}" \ + --output_dir="${SEMANTIC_SEG_FOLDER}" + +# Build TFRecords of the dataset. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +# IMAGE_FOLDER="${PQR_ROOT}/JPEGImages" +IMAGE_FOLDER="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/dataset/JPEGImages" +LIST_FOLDER="${PQR_ROOT}/ImageSets" + +echo ${IMAGE_FOLDER} + +echo "Converting PQR dataset..." +python3.7 ./build_new_pqr_data.py \ +# python3.7 ./build_data.py \ + --image_folder="${IMAGE_FOLDER}" \ + --semantic_segmentation_folder="${SEMANTIC_SEG_FOLDER}" \ + --list_folder="${LIST_FOLDER}" \ + --image_format="jpg" \ + --output_dir="${OUTPUT_DIR}" diff --git a/deeplab/models/research/deeplab/datasets/data_generator.py b/deeplab/models/research/deeplab/datasets/data_generator.py new file mode 100644 index 0000000..6cc230a --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/data_generator.py @@ -0,0 +1,361 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wrapper for providing semantic segmentaion data. + +The SegmentationDataset class provides both images and annotations (semantic +segmentation and/or instance segmentation) for TensorFlow. Currently, we +support the following datasets: + +1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). + +PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects +(e.g., bike, person, and so on) and leaves all the other semantic classes as +one background class. The dataset contains 1464, 1449, and 1456 annotated +images for the training, validation and test respectively. + +2. Cityscapes dataset (https://www.cityscapes-dataset.com) + +The Cityscapes dataset contains 19 semantic labels (such as road, person, car, +and so on) for urban street scenes. + +3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K) + +The ADE20K dataset contains 150 semantic labels both urban street scenes and +indoor scenes. + +References: + M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn, + and A. Zisserman, The pascal visual object classes challenge a retrospective. + IJCV, 2014. + + M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, + U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban + scene understanding," In Proc. of CVPR, 2016. + + B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing + through ADE20K dataset", In Proc. of CVPR, 2017. +""" + +import collections +import os +import tensorflow as tf +from deeplab import common +from deeplab import input_preprocess + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + [ + 'splits_to_sizes', # Splits of the dataset into training, val and test. + 'num_classes', # Number of semantic classes, including the + # background class (if exists). For example, there + # are 20 foreground classes + 1 background class in + # the PASCAL VOC 2012 dataset. Thus, we set + # num_classes=21. + 'ignore_label', # Ignore label value. + ]) + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train_fine': 2975, + 'train_coarse': 22973, + 'trainval_fine': 3475, + 'trainval_coarse': 23473, + 'val_fine': 500, + 'test_fine': 1525}, + num_classes=19, + ignore_label=255, +) + +_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 1464, + 'train_aug': 10582, + 'trainval': 2913, + 'val': 1449, + }, + num_classes=21, + ignore_label=255, +) + +_ADE20K_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 20210, # num of samples in images/training + 'val': 2000, # num of samples in images/validation + }, + num_classes=151, + ignore_label=0, +) + +_PQR_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 3, # number of file in the train folder + 'trainval': 5, + 'val': 2, + }, + num_classes=2, # number of classes in your dataset + ignore_label=255, # white edges that will be ignored to be class +) + +_DATASETS_INFORMATION = { + 'cityscapes': _CITYSCAPES_INFORMATION, + 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION, + 'ade20k': _ADE20K_INFORMATION, + 'pqr': _PQR_SEG_INFORMATION +} + +# Default file pattern of TFRecord of TensorFlow Example. +_FILE_PATTERN = '%s-*' + + +def get_cityscapes_dataset_name(): + return 'cityscapes' + + +class Dataset(object): + """Represents input dataset for deeplab model.""" + + def __init__(self, + dataset_name, + split_name, + dataset_dir, + batch_size, + crop_size, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + model_variant=None, + num_readers=1, + is_training=False, + should_shuffle=False, + should_repeat=False): + """Initializes the dataset. + + Args: + dataset_name: Dataset name. + split_name: A train/val Split name. + dataset_dir: The directory of the dataset sources. + batch_size: Batch size. + crop_size: The size used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + model_variant: Model variant (string) for choosing how to mean-subtract + the images. See feature_extractor.network_map for supported model + variants. + num_readers: Number of readers for data provider. + is_training: Boolean, if dataset is for training or not. + should_shuffle: Boolean, if should shuffle the input data. + should_repeat: Boolean, if should repeat the input data. + + Raises: + ValueError: Dataset name and split name are not supported. + """ + if dataset_name not in _DATASETS_INFORMATION: + raise ValueError('The specified dataset is not supported yet.') + self.dataset_name = dataset_name + + splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes + + if split_name not in splits_to_sizes: + raise ValueError('data split name %s not recognized' % split_name) + + if model_variant is None: + tf.logging.warning('Please specify a model_variant. See ' + 'feature_extractor.network_map for supported model ' + 'variants.') + + self.split_name = split_name + self.dataset_dir = dataset_dir + self.batch_size = batch_size + self.crop_size = crop_size + self.min_resize_value = min_resize_value + self.max_resize_value = max_resize_value + self.resize_factor = resize_factor + self.min_scale_factor = min_scale_factor + self.max_scale_factor = max_scale_factor + self.scale_factor_step_size = scale_factor_step_size + self.model_variant = model_variant + self.num_readers = num_readers + self.is_training = is_training + self.should_shuffle = should_shuffle + self.should_repeat = should_repeat + + self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes + self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label + + def _parse_function(self, example_proto): + """Function to parse the example proto. + + Args: + example_proto: Proto in the format of tf.Example. + + Returns: + A dictionary with parsed image, label, height, width and image name. + + Raises: + ValueError: Label is of wrong shape. + """ + + # Currently only supports jpeg and png. + # Need to use this logic because the shape is not known for + # tf.image.decode_image and we rely on this info to + # extend label if necessary. + def _decode_image(content, channels): + return tf.cond( + tf.image.is_jpeg(content), + lambda: tf.image.decode_jpeg(content, channels), + lambda: tf.image.decode_png(content, channels)) + + features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/filename': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/height': + tf.FixedLenFeature((), tf.int64, default_value=0), + 'image/width': + tf.FixedLenFeature((), tf.int64, default_value=0), + 'image/segmentation/class/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/segmentation/class/format': + tf.FixedLenFeature((), tf.string, default_value='png'), + } + + parsed_features = tf.parse_single_example(example_proto, features) + + image = _decode_image(parsed_features['image/encoded'], channels=3) + + label = None + if self.split_name != common.TEST_SET: + label = _decode_image( + parsed_features['image/segmentation/class/encoded'], channels=1) + + image_name = parsed_features['image/filename'] + if image_name is None: + image_name = tf.constant('') + + sample = { + common.IMAGE: image, + common.IMAGE_NAME: image_name, + common.HEIGHT: parsed_features['image/height'], + common.WIDTH: parsed_features['image/width'], + } + + if label is not None: + if label.get_shape().ndims == 2: + label = tf.expand_dims(label, 2) + elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1: + pass + else: + raise ValueError('Input label shape must be [height, width], or ' + '[height, width, 1].') + + label.set_shape([None, None, 1]) + + sample[common.LABELS_CLASS] = label + + return sample + + def _preprocess_image(self, sample): + """Preprocesses the image and label. + + Args: + sample: A sample containing image and label. + + Returns: + sample: Sample with preprocessed image and label. + + Raises: + ValueError: Ground truth label not provided during training. + """ + image = sample[common.IMAGE] + label = sample[common.LABELS_CLASS] + + original_image, image, label = input_preprocess.preprocess_image_and_label( + image=image, + label=label, + crop_height=self.crop_size[0], + crop_width=self.crop_size[1], + min_resize_value=self.min_resize_value, + max_resize_value=self.max_resize_value, + resize_factor=self.resize_factor, + min_scale_factor=self.min_scale_factor, + max_scale_factor=self.max_scale_factor, + scale_factor_step_size=self.scale_factor_step_size, + ignore_label=self.ignore_label, + is_training=self.is_training, + model_variant=self.model_variant) + + sample[common.IMAGE] = image + + if not self.is_training: + # Original image is only used during visualization. + sample[common.ORIGINAL_IMAGE] = original_image + + if label is not None: + sample[common.LABEL] = label + + # Remove common.LABEL_CLASS key in the sample since it is only used to + # derive label and not used in training and evaluation. + sample.pop(common.LABELS_CLASS, None) + + return sample + + def get_one_shot_iterator(self): + """Gets an iterator that iterates across the dataset once. + + Returns: + An iterator of type tf.data.Iterator. + """ + + files = self._get_all_files() + + dataset = ( + tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers) + .map(self._parse_function, num_parallel_calls=self.num_readers) + .map(self._preprocess_image, num_parallel_calls=self.num_readers)) + + if self.should_shuffle: + dataset = dataset.shuffle(buffer_size=100) + + if self.should_repeat: + dataset = dataset.repeat() # Repeat forever for training. + else: + dataset = dataset.repeat(1) + + dataset = dataset.batch(self.batch_size).prefetch(self.batch_size) + return dataset.make_one_shot_iterator() + + def _get_all_files(self): + """Gets all the files to read data from. + + Returns: + A list of input files. + """ + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(self.dataset_dir, + file_pattern % self.split_name) + return tf.gfile.Glob(file_pattern) diff --git a/deeplab/models/research/deeplab/datasets/data_generator_test.py b/deeplab/models/research/deeplab/datasets/data_generator_test.py new file mode 100644 index 0000000..f4425d0 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/data_generator_test.py @@ -0,0 +1,115 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for deeplab.datasets.data_generator.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow as tf + +from deeplab import common +from deeplab.datasets import data_generator + +ImageAttributes = collections.namedtuple( + 'ImageAttributes', ['image', 'label', 'height', 'width', 'image_name']) + + +class DatasetTest(tf.test.TestCase): + + # Note: training dataset cannot be tested since there is shuffle operation. + # When disabling the shuffle, training dataset is operated same as validation + # dataset. Therefore it is not tested again. + def testPascalVocSegTestData(self): + dataset = data_generator.Dataset( + dataset_name='pascal_voc_seg', + split_name='val', + dataset_dir= + 'deeplab/testing/pascal_voc_seg', + batch_size=1, + crop_size=[3, 3], # Use small size for testing. + min_resize_value=3, + max_resize_value=3, + resize_factor=None, + min_scale_factor=0.01, + max_scale_factor=2.0, + scale_factor_step_size=0.25, + is_training=False, + model_variant='mobilenet_v2') + + self.assertAllEqual(dataset.num_of_classes, 21) + self.assertAllEqual(dataset.ignore_label, 255) + + num_of_images = 3 + with self.test_session() as sess: + iterator = dataset.get_one_shot_iterator() + + for i in range(num_of_images): + batch = iterator.get_next() + batch, = sess.run([batch]) + image_attributes = _get_attributes_of_image(i) + self.assertEqual(batch[common.HEIGHT][0], image_attributes.height) + self.assertEqual(batch[common.WIDTH][0], image_attributes.width) + self.assertEqual(batch[common.IMAGE_NAME][0], + image_attributes.image_name.encode()) + + # All data have been read. + with self.assertRaisesRegexp(tf.errors.OutOfRangeError, ''): + sess.run([iterator.get_next()]) + + +def _get_attributes_of_image(index): + """Gets the attributes of the image. + + Args: + index: Index of image in all images. + + Returns: + Attributes of the image in the format of ImageAttributes. + + Raises: + ValueError: If index is of wrong value. + """ + if index == 0: + return ImageAttributes( + image=None, + label=None, + height=366, + width=500, + image_name='2007_000033') + elif index == 1: + return ImageAttributes( + image=None, + label=None, + height=335, + width=500, + image_name='2007_000042') + elif index == 2: + return ImageAttributes( + image=None, + label=None, + height=333, + width=500, + image_name='2007_000061') + else: + raise ValueError('Index can only be 0, 1 or 2.') + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/datasets/download_and_convert_ade20k.sh b/deeplab/models/research/deeplab/datasets/download_and_convert_ade20k.sh new file mode 100644 index 0000000..3614ae4 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/download_and_convert_ade20k.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to download and preprocess the ADE20K dataset. +# +# Usage: +# bash ./download_and_convert_ade20k.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_data.py +# - build_ade20k_data.py +# - download_and_convert_ade20k.sh +# + ADE20K +# + tfrecord +# + ADEChallengeData2016 +# + annotations +# + training +# + validation +# + images +# + training +# + validation + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="./ADE20K" +mkdir -p "${WORK_DIR}" +cd "${WORK_DIR}" + +# Helper function to download and unpack ADE20K dataset. +download_and_uncompress() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f "${FILENAME}" ]; then + echo "Downloading ${FILENAME} to ${WORK_DIR}" + wget -nd -c "${BASE_URL}/${FILENAME}" + fi + echo "Uncompressing ${FILENAME}" + unzip "${FILENAME}" +} + +# Download the images. +BASE_URL="http://data.csail.mit.edu/places/ADEchallenge" +FILENAME="ADEChallengeData2016.zip" + +download_and_uncompress "${BASE_URL}" "${FILENAME}" + +cd "${CURRENT_DIR}" + +# Root path for ADE20K dataset. +ADE20K_ROOT="${WORK_DIR}/ADEChallengeData2016" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +echo "Converting ADE20K dataset..." +python ./build_ade20k_data.py \ + --train_image_folder="${ADE20K_ROOT}/images/training/" \ + --train_image_label_folder="${ADE20K_ROOT}/annotations/training/" \ + --val_image_folder="${ADE20K_ROOT}/images/validation/" \ + --val_image_label_folder="${ADE20K_ROOT}/annotations/validation/" \ + --output_dir="${OUTPUT_DIR}" diff --git a/deeplab/models/research/deeplab/datasets/download_and_convert_voc2012.sh b/deeplab/models/research/deeplab/datasets/download_and_convert_voc2012.sh new file mode 100644 index 0000000..607c654 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/download_and_convert_voc2012.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to download and preprocess the PASCAL VOC 2012 dataset. +# +# Usage: +# bash ./download_and_convert_voc2012.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_data.py +# - build_voc2012_data.py +# - download_and_convert_voc2012.sh +# - remove_gt_colormap.py +# + pascal_voc_seg +# + VOCdevkit +# + VOC2012 +# + JPEGImages +# + SegmentationClass +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="./pascal_voc_seg" +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +mkdir -p "${WORK_DIR}" +cd "${WORK_DIR}" + +# Helper function to download and unpack VOC 2012 dataset. +download_and_uncompress() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f "${FILENAME}" ]; then + echo "Downloading ${FILENAME} to ${WORK_DIR}" + wget -nd -c "${BASE_URL}/${FILENAME}" + fi + echo "Uncompressing ${FILENAME}" + sudo apt install unzip + unzip "${FILENAME}" +} + +# Download the images. +BASE_URL="https://data.deepai.org/" +FILENAME="PascalVOC2012.zip" + +# download_and_uncompress "${BASE_URL}" "${FILENAME}" +# wget "${BASE_URL}" "${FILENAME}" + +cd "${CURRENT_DIR}" + +# Root path for PASCAL VOC 2012 dataset. +PASCAL_ROOT="${WORK_DIR}/VOC2012" + +# Remove the colormap in the ground truth annotations. +SEG_FOLDER="${PASCAL_ROOT}/SegmentationClass" +SEMANTIC_SEG_FOLDER="${PASCAL_ROOT}/SegmentationClassRaw" + +echo "Removing the color map in ground truth annotations..." +python3 "${SCRIPT_DIR}/remove_gt_colormap.py" \ + --original_gt_folder="${SEG_FOLDER}" \ + --output_dir="${SEMANTIC_SEG_FOLDER}" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +IMAGE_FOLDER="${PASCAL_ROOT}/JPEGImages" +LIST_FOLDER="${PASCAL_ROOT}/ImageSets/Segmentation" +echo ${IMAGE_FOLDER} + +echo "Converting PASCAL VOC 2012 dataset..." +python3 "${SCRIPT_DIR}/build_voc2012_data.py" \ + --image_folder="${IMAGE_FOLDER}" \ + --semantic_segmentation_folder="${SEMANTIC_SEG_FOLDER}" \ + --list_folder="${LIST_FOLDER}" \ + --image_format="jpg" \ + --output_dir="${OUTPUT_DIR}" diff --git a/deeplab/models/research/deeplab/datasets/label_pqr.py b/deeplab/models/research/deeplab/datasets/label_pqr.py new file mode 100644 index 0000000..ec90fc6 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/label_pqr.py @@ -0,0 +1,38 @@ +import tensorflow as tf +from PIL import Image +from tqdm import tqdm +import numpy as np + +import os, shutil + +# palette (color map) describes the (R, G, B): Label pair +palette = {(0, 0, 0) : 0 , + (0, 0, 255) : 1} + +def convert_from_color_segmentation(arr_3d): + arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8) + + for c, i in palette.items(): + m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2) + arr_2d[m] = i + return arr_2d + + +label_dir = './PQR/dataset/SegmentationClass/' +new_label_dir = './PQR/dataset/SegmentationClassRaw/' + +if not os.path.isdir(new_label_dir): + print("creating folder: ",new_label_dir) + os.mkdir(new_label_dir) +else: + print("Folder alread exists. Delete the folder and re-run the code!!!") + + +label_files = os.listdir(label_dir) + +for l_f in tqdm(label_files): + arr = np.array(Image.open(label_dir + l_f)) + arr = arr[:,:,0:3] + arr_2d = convert_from_color_segmentation(arr) + l_f = l_f[:-4] + '.png' + Image.fromarray(arr_2d).save(new_label_dir + l_f) diff --git a/deeplab/models/research/deeplab/datasets/remove_gt_colormap.py b/deeplab/models/research/deeplab/datasets/remove_gt_colormap.py new file mode 100644 index 0000000..9005700 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/remove_gt_colormap.py @@ -0,0 +1,83 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Removes the color map from segmentation annotations. + +Removes the color map from the ground truth segmentation annotations and save +the results to output_dir. +""" +import glob +import os.path +import numpy as np + +from PIL import Image + +import tensorflow as tf + +FLAGS = tf.compat.v1.flags.FLAGS + +tf.compat.v1.flags.DEFINE_string('original_gt_folder', + './VOCdevkit/VOC2012/SegmentationClass', + 'Original ground truth annotations.') + +tf.compat.v1.flags.DEFINE_string('segmentation_format', 'png', 'Segmentation format.') + +tf.compat.v1.flags.DEFINE_string('output_dir', + './VOCdevkit/VOC2012/SegmentationClassRaw', + 'folder to save modified ground truth annotations.') + + +def _remove_colormap(filename): + """Removes the color map from the annotation. + + Args: + filename: Ground truth annotation filename. + + Returns: + Annotation without color map. + """ + return np.array(Image.open(filename)) + + +def _save_annotation(annotation, filename): + """Saves the annotation as png file. + + Args: + annotation: Segmentation annotation. + filename: Output filename. + """ + pil_image = Image.fromarray(annotation.astype(dtype=np.uint8)) + with tf.io.gfile.GFile(filename, mode='w') as f: + pil_image.save(f, 'PNG') + + +def main(unused_argv): + # Create the output directory if not exists. + if not tf.io.gfile.isdir(FLAGS.output_dir): + tf.io.gfile.makedirs(FLAGS.output_dir) + + annotations = glob.glob(os.path.join(FLAGS.original_gt_folder, + '*.' + FLAGS.segmentation_format)) + for annotation in annotations: + raw_annotation = _remove_colormap(annotation) + filename = os.path.basename(annotation)[:-4] + _save_annotation(raw_annotation, + os.path.join( + FLAGS.output_dir, + filename + '.' + FLAGS.segmentation_format)) + + +if __name__ == '__main__': + tf.compat.v1.app.run() diff --git a/deeplab/models/research/deeplab/datasets/test.py b/deeplab/models/research/deeplab/datasets/test.py new file mode 100644 index 0000000..108b864 --- /dev/null +++ b/deeplab/models/research/deeplab/datasets/test.py @@ -0,0 +1,19 @@ +python3.7 ./build_new_pqr_data.py --image_folder="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/dataset/JPEGImages" --semantic_segmentation_folder="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw" --list_folder="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/dataset/ImageSets" --image_format="jpg" --output_dir="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/tfrecord" + + +python3.7 ./export_model.py \ + --logtostderr \ + --checkpoint_path="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/train/model.ckpt-5" \ + --export_path="/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/export/frozen_inference_graph.pb" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --num_classes=2 \ + --crop_size=448 \ + --crop_size=448 \ + --inference_scales=1.0 + +python deeplab/export_model.py --checkpoint_path=/code/models/research/deeplab/weights_input_level_17/model.ckpt-22000 --export_path=/code/models/research/deeplab/frozen_weights_level_17/frozen_inference_graph.pb --model_variant="xception_65" --atrous_rates=6 --atrous_rates=12 --atrous_rates=18 --output_stride=16 --crop_size=2048 --crop_size=2048 --num_classes=3 \ No newline at end of file diff --git a/deeplab/models/research/deeplab/deeplab_demo.ipynb b/deeplab/models/research/deeplab/deeplab_demo.ipynb new file mode 100644 index 0000000..81ccfde --- /dev/null +++ b/deeplab/models/research/deeplab/deeplab_demo.ipynb @@ -0,0 +1,369 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "KFPcBuVFw61h" + }, + "source": [ + "# Overview\n", + "\n", + "This colab demonstrates the steps to use the DeepLab model to perform semantic segmentation on a sample input image. Expected outputs are semantic labels overlayed on the sample image.\n", + "\n", + "### About DeepLab\n", + "The models used in this colab perform semantic segmentation. Semantic segmentation models focus on assigning semantic labels, such as sky, person, or car, to multiple objects and stuff in a single image." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "t3ozFsEEP-u_" + }, + "source": [ + "# Instructions\n", + "\u003ch3\u003e\u003ca href=\"https://cloud.google.com/tpu/\"\u003e\u003cimg valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png\" width=\"50\"\u003e\u003c/a\u003e \u0026nbsp;\u0026nbsp;Use a free TPU device\u003c/h3\u003e\n", + "\n", + " 1. On the main menu, click Runtime and select **Change runtime type**. Set \"TPU\" as the hardware accelerator.\n", + " 1. Click Runtime again and select **Runtime \u003e Run All**. You can also run the cells manually with Shift-ENTER." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7cRiapZ1P3wy" + }, + "source": [ + "## Import Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "code", + "colab": {}, + "colab_type": "code", + "id": "kAbdmRmvq0Je" + }, + "outputs": [], + "source": [ + "import os\n", + "from io import BytesIO\n", + "import tarfile\n", + "import tempfile\n", + "from six.moves import urllib\n", + "\n", + "from matplotlib import gridspec\n", + "from matplotlib import pyplot as plt\n", + "import numpy as np\n", + "from PIL import Image\n", + "\n", + "%tensorflow_version 1.x\n", + "import tensorflow as tf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "p47cYGGOQE1W" + }, + "source": [ + "## Import helper methods\n", + "These methods help us perform the following tasks:\n", + "* Load the latest version of the pretrained DeepLab model\n", + "* Load the colormap from the PASCAL VOC dataset\n", + "* Adds colors to various labels, such as \"pink\" for people, \"green\" for bicycle and more\n", + "* Visualize an image, and add an overlay of colors on various regions" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "code", + "colab": {}, + "colab_type": "code", + "id": "vN0kU6NJ1Ye5" + }, + "outputs": [], + "source": [ + "class DeepLabModel(object):\n", + " \"\"\"Class to load deeplab model and run inference.\"\"\"\n", + "\n", + " INPUT_TENSOR_NAME = 'ImageTensor:0'\n", + " OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n", + " INPUT_SIZE = 513\n", + " FROZEN_GRAPH_NAME = 'frozen_inference_graph'\n", + "\n", + " def __init__(self, tarball_path):\n", + " \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n", + " self.graph = tf.Graph()\n", + "\n", + " graph_def = None\n", + " # Extract frozen graph from tar archive.\n", + " tar_file = tarfile.open(tarball_path)\n", + " for tar_info in tar_file.getmembers():\n", + " if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):\n", + " file_handle = tar_file.extractfile(tar_info)\n", + " graph_def = tf.GraphDef.FromString(file_handle.read())\n", + " break\n", + "\n", + " tar_file.close()\n", + "\n", + " if graph_def is None:\n", + " raise RuntimeError('Cannot find inference graph in tar archive.')\n", + "\n", + " with self.graph.as_default():\n", + " tf.import_graph_def(graph_def, name='')\n", + "\n", + " self.sess = tf.Session(graph=self.graph)\n", + "\n", + " def run(self, image):\n", + " \"\"\"Runs inference on a single image.\n", + "\n", + " Args:\n", + " image: A PIL.Image object, raw input image.\n", + "\n", + " Returns:\n", + " resized_image: RGB image resized from original input image.\n", + " seg_map: Segmentation map of `resized_image`.\n", + " \"\"\"\n", + " width, height = image.size\n", + " resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n", + " target_size = (int(resize_ratio * width), int(resize_ratio * height))\n", + " resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n", + " batch_seg_map = self.sess.run(\n", + " self.OUTPUT_TENSOR_NAME,\n", + " feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n", + " seg_map = batch_seg_map[0]\n", + " return resized_image, seg_map\n", + "\n", + "\n", + "def create_pascal_label_colormap():\n", + " \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n", + "\n", + " Returns:\n", + " A Colormap for visualizing segmentation results.\n", + " \"\"\"\n", + " colormap = np.zeros((256, 3), dtype=int)\n", + " ind = np.arange(256, dtype=int)\n", + "\n", + " for shift in reversed(range(8)):\n", + " for channel in range(3):\n", + " colormap[:, channel] |= ((ind \u003e\u003e channel) \u0026 1) \u003c\u003c shift\n", + " ind \u003e\u003e= 3\n", + "\n", + " return colormap\n", + "\n", + "\n", + "def label_to_color_image(label):\n", + " \"\"\"Adds color defined by the dataset colormap to the label.\n", + "\n", + " Args:\n", + " label: A 2D array with integer type, storing the segmentation label.\n", + "\n", + " Returns:\n", + " result: A 2D array with floating type. The element of the array\n", + " is the color indexed by the corresponding element in the input label\n", + " to the PASCAL color map.\n", + "\n", + " Raises:\n", + " ValueError: If label is not of rank 2 or its value is larger than color\n", + " map maximum entry.\n", + " \"\"\"\n", + " if label.ndim != 2:\n", + " raise ValueError('Expect 2-D input label')\n", + "\n", + " colormap = create_pascal_label_colormap()\n", + "\n", + " if np.max(label) \u003e= len(colormap):\n", + " raise ValueError('label value too large.')\n", + "\n", + " return colormap[label]\n", + "\n", + "\n", + "def vis_segmentation(image, seg_map):\n", + " \"\"\"Visualizes input image, segmentation map and overlay view.\"\"\"\n", + " plt.figure(figsize=(15, 5))\n", + " grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n", + "\n", + " plt.subplot(grid_spec[0])\n", + " plt.imshow(image)\n", + " plt.axis('off')\n", + " plt.title('input image')\n", + "\n", + " plt.subplot(grid_spec[1])\n", + " seg_image = label_to_color_image(seg_map).astype(np.uint8)\n", + " plt.imshow(seg_image)\n", + " plt.axis('off')\n", + " plt.title('segmentation map')\n", + "\n", + " plt.subplot(grid_spec[2])\n", + " plt.imshow(image)\n", + " plt.imshow(seg_image, alpha=0.7)\n", + " plt.axis('off')\n", + " plt.title('segmentation overlay')\n", + "\n", + " unique_labels = np.unique(seg_map)\n", + " ax = plt.subplot(grid_spec[3])\n", + " plt.imshow(\n", + " FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n", + " ax.yaxis.tick_right()\n", + " plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n", + " plt.xticks([], [])\n", + " ax.tick_params(width=0.0)\n", + " plt.grid('off')\n", + " plt.show()\n", + "\n", + "\n", + "LABEL_NAMES = np.asarray([\n", + " 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n", + " 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n", + " 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'\n", + "])\n", + "\n", + "FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)\n", + "FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nGcZzNkASG9A" + }, + "source": [ + "## Select a pretrained model\n", + "We have trained the DeepLab model using various backbone networks. Select one from the MODEL_NAME list." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "c4oXKmnjw6i_" + }, + "outputs": [], + "source": [ + "MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']\n", + "\n", + "_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'\n", + "_MODEL_URLS = {\n", + " 'mobilenetv2_coco_voctrainaug':\n", + " 'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',\n", + " 'mobilenetv2_coco_voctrainval':\n", + " 'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',\n", + " 'xception_coco_voctrainaug':\n", + " 'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',\n", + " 'xception_coco_voctrainval':\n", + " 'deeplabv3_pascal_trainval_2018_01_04.tar.gz',\n", + "}\n", + "_TARBALL_NAME = 'deeplab_model.tar.gz'\n", + "\n", + "model_dir = tempfile.mkdtemp()\n", + "tf.gfile.MakeDirs(model_dir)\n", + "\n", + "download_path = os.path.join(model_dir, _TARBALL_NAME)\n", + "print('downloading model, this might take a while...')\n", + "urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],\n", + " download_path)\n", + "print('download completed! loading DeepLab model...')\n", + "\n", + "MODEL = DeepLabModel(download_path)\n", + "print('model loaded successfully!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "SZst78N-4OKO" + }, + "source": [ + "## Run on sample images\n", + "\n", + "Select one of sample images (leave `IMAGE_URL` empty) or feed any internet image\n", + "url for inference.\n", + "\n", + "Note that this colab uses single scale inference for fast computation,\n", + "so the results may slightly differ from the visualizations in the\n", + "[README](https://github.com/tensorflow/models/blob/master/research/deeplab/README.md) file,\n", + "which uses multi-scale and left-right flipped inputs." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "edGukUHXyymr" + }, + "outputs": [], + "source": [ + "\n", + "SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']\n", + "IMAGE_URL = '' #@param {type:\"string\"}\n", + "\n", + "_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'\n", + " 'deeplab/g3doc/img/%s.jpg?raw=true')\n", + "\n", + "\n", + "def run_visualization(url):\n", + " \"\"\"Inferences DeepLab model and visualizes result.\"\"\"\n", + " try:\n", + " f = urllib.request.urlopen(url)\n", + " jpeg_str = f.read()\n", + " original_im = Image.open(BytesIO(jpeg_str))\n", + " except IOError:\n", + " print('Cannot retrieve image. Please check url: ' + url)\n", + " return\n", + "\n", + " print('running deeplab on image %s...' % url)\n", + " resized_im, seg_map = MODEL.run(original_im)\n", + "\n", + " vis_segmentation(resized_im, seg_map)\n", + "\n", + "\n", + "image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE\n", + "run_visualization(image_url)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "aUbVoHScTJYe" + }, + "source": [ + "## What's next\n", + "\n", + "* Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly.\n", + "* Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project.\n", + "* For more information on running the DeepLab model on Cloud TPUs, see the [DeepLab tutorial](https://cloud.google.com/tpu/docs/tutorials/deeplab).\n" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "DeepLab Demo.ipynb", + "provenance": [], + "toc_visible": true, + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/deeplab/models/research/deeplab/deprecated/__init__.py b/deeplab/models/research/deeplab/deprecated/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/deprecated/segmentation_dataset.py b/deeplab/models/research/deeplab/deprecated/segmentation_dataset.py new file mode 100644 index 0000000..4a1de09 --- /dev/null +++ b/deeplab/models/research/deeplab/deprecated/segmentation_dataset.py @@ -0,0 +1,210 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides data from semantic segmentation datasets. + +The SegmentationDataset class provides both images and annotations (semantic +segmentation and/or instance segmentation) for TensorFlow. Currently, we +support the following datasets: + +1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). + +PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects +(e.g., bike, person, and so on) and leaves all the other semantic classes as +one background class. The dataset contains 1464, 1449, and 1456 annotated +images for the training, validation and test respectively. + +2. Cityscapes dataset (https://www.cityscapes-dataset.com) + +The Cityscapes dataset contains 19 semantic labels (such as road, person, car, +and so on) for urban street scenes. + +3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K) + +The ADE20K dataset contains 150 semantic labels both urban street scenes and +indoor scenes. + +References: + M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn, + and A. Zisserman, The pascal visual object classes challenge a retrospective. + IJCV, 2014. + + M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, + U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban + scene understanding," In Proc. of CVPR, 2016. + + B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing + through ADE20K dataset", In Proc. of CVPR, 2017. +""" +import collections +import os.path +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +slim = contrib_slim + +dataset = slim.dataset + +tfexample_decoder = slim.tfexample_decoder + + +_ITEMS_TO_DESCRIPTIONS = { + 'image': 'A color image of varying height and width.', + 'labels_class': ('A semantic segmentation label whose size matches image.' + 'Its values range from 0 (background) to num_classes.'), +} + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + ['splits_to_sizes', # Splits of the dataset into training, val, and test. + 'num_classes', # Number of semantic classes, including the background + # class (if exists). For example, there are 20 + # foreground classes + 1 background class in the PASCAL + # VOC 2012 dataset. Thus, we set num_classes=21. + 'ignore_label', # Ignore label value. + ] +) + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train_fine': 2975, + 'val_fine': 500, + }, + num_classes=19, + ignore_label=255, +) + +_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 1464, + 'train_aug': 10582, + 'trainval': 2913, + 'val': 1449, + }, + num_classes=21, + ignore_label=255, +) + +# These number (i.e., 'train'/'test') seems to have to be hard coded +# You are required to figure it out for your training/testing example. +_ADE20K_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 20210, # num of samples in images/training + 'val': 2000, # num of samples in images/validation + }, + num_classes=151, + ignore_label=0, +) + +_PQR_INFORMATION = DatasetDescriptor( +splits_to_sizes={ + 'train': 3, + 'val': 2, + 'trainval': 5, +}, +num_classes=2, +ignore_label=255, +) + +_DATASETS_INFORMATION = { + 'cityscapes': _CITYSCAPES_INFORMATION, + 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION, + 'ade20k': _ADE20K_INFORMATION, + 'pqr': _PQR_INFORMATION, +} + +# Default file pattern of TFRecord of TensorFlow Example. +_FILE_PATTERN = '%s-*' + + +def get_cityscapes_dataset_name(): + return 'cityscapes' + + +def get_dataset(dataset_name, split_name, dataset_dir): + """Gets an instance of slim Dataset. + + Args: + dataset_name: Dataset name. + split_name: A train/val Split name. + dataset_dir: The directory of the dataset sources. + + Returns: + An instance of slim Dataset. + + Raises: + ValueError: if the dataset_name or split_name is not recognized. + """ + if dataset_name not in _DATASETS_INFORMATION: + raise ValueError('The specified dataset is not supported yet.') + + splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes + + if split_name not in splits_to_sizes: + raise ValueError('data split name %s not recognized' % split_name) + + # Prepare the variables for different datasets. + num_classes = _DATASETS_INFORMATION[dataset_name].num_classes + ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label + + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(dataset_dir, file_pattern % split_name) + + # Specify how the TF-Examples are decoded. + keys_to_features = { + 'image/encoded': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/filename': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/format': tf.FixedLenFeature( + (), tf.string, default_value='jpeg'), + 'image/height': tf.FixedLenFeature( + (), tf.int64, default_value=0), + 'image/width': tf.FixedLenFeature( + (), tf.int64, default_value=0), + 'image/segmentation/class/encoded': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/segmentation/class/format': tf.FixedLenFeature( + (), tf.string, default_value='png'), + } + items_to_handlers = { + 'image': tfexample_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3), + 'image_name': tfexample_decoder.Tensor('image/filename'), + 'height': tfexample_decoder.Tensor('image/height'), + 'width': tfexample_decoder.Tensor('image/width'), + 'labels_class': tfexample_decoder.Image( + image_key='image/segmentation/class/encoded', + format_key='image/segmentation/class/format', + channels=1), + } + + decoder = tfexample_decoder.TFExampleDecoder( + keys_to_features, items_to_handlers) + + return dataset.Dataset( + data_sources=file_pattern, + reader=tf.TFRecordReader, + decoder=decoder, + num_samples=splits_to_sizes[split_name], + items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, + ignore_label=ignore_label, + num_classes=num_classes, + name=dataset_name, + multi_label=True) diff --git a/deeplab/models/research/deeplab/eval.py b/deeplab/models/research/deeplab/eval.py new file mode 100644 index 0000000..4f5fb8b --- /dev/null +++ b/deeplab/models/research/deeplab/eval.py @@ -0,0 +1,227 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluation script for the DeepLab model. + +See model.py for more details and usage. +""" + +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import metrics as contrib_metrics +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import tfprof as contrib_tfprof +from tensorflow.contrib import training as contrib_training +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +# Settings for log directories. + +flags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.') + +flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') + +# Settings for evaluating the model. + +flags.DEFINE_integer('eval_batch_size', 1, + 'The number of images in each batch during evaluation.') + +flags.DEFINE_list('eval_crop_size', '513,513', + 'Image crop size [height, width] for evaluation.') + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test. +flags.DEFINE_multi_float('eval_scales', [1.0], + 'The scales to resize images for evaluation.') + +# Change to True for adding flipped images during test. +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images for evaluation or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. + +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('eval_split', 'val', + 'Which split of the dataset used for evaluation') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + +flags.DEFINE_integer('max_number_of_evaluations', 0, + 'Maximum number of eval iterations. Will loop ' + 'indefinitely upon nonpositive values.') + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.eval_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=FLAGS.eval_batch_size, + crop_size=[int(sz) for sz in FLAGS.eval_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + model_variant=FLAGS.model_variant, + num_readers=2, + is_training=False, + should_shuffle=False, + should_repeat=False) + + tf.gfile.MakeDirs(FLAGS.eval_logdir) + tf.logging.info('Evaluating on %s set', FLAGS.eval_split) + + with tf.Graph().as_default(): + samples = dataset.get_one_shot_iterator().get_next() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes}, + crop_size=[int(sz) for sz in FLAGS.eval_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly. + samples[common.IMAGE].set_shape( + [FLAGS.eval_batch_size, + int(FLAGS.eval_crop_size[0]), + int(FLAGS.eval_crop_size[1]), + 3]) + if tuple(FLAGS.eval_scales) == (1.0,): + tf.logging.info('Performing single-scale test.') + predictions = model.predict_labels(samples[common.IMAGE], model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Performing multi-scale test.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + + predictions = model.predict_labels_multi_scale( + samples[common.IMAGE], + model_options=model_options, + eval_scales=FLAGS.eval_scales, + add_flipped_images=FLAGS.add_flipped_images) + predictions = predictions[common.OUTPUT_TYPE] + predictions = tf.reshape(predictions, shape=[-1]) + labels = tf.reshape(samples[common.LABEL], shape=[-1]) + weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label)) + + # Set ignore_label regions to label 0, because metrics.mean_iou requires + # range of labels = [0, dataset.num_classes). Note the ignore_label regions + # are not evaluated since the corresponding regions contain weights = 0. + labels = tf.where( + tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels) + + predictions_tag = 'miou' + for eval_scale in FLAGS.eval_scales: + predictions_tag += '_' + str(eval_scale) + if FLAGS.add_flipped_images: + predictions_tag += '_flipped' + + # Define the evaluation metric. + metric_map = {} + num_classes = dataset.num_of_classes + metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou( + labels=labels, predictions=predictions, num_classes=num_classes, + weights=weights) + # IoU for each class. + one_hot_predictions = tf.one_hot(predictions, num_classes) + one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes]) + one_hot_labels = tf.one_hot(labels, num_classes) + one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes]) + for c in range(num_classes): + predictions_tag_c = '%s_class_%d' % (predictions_tag, c) + tp, tp_op = tf.metrics.true_positives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + fp, fp_op = tf.metrics.false_positives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + fn, fn_op = tf.metrics.false_negatives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op) + iou = tf.where(tf.greater(tp + fn, 0.0), + tp / (tp + fn + fp), + tf.constant(np.NaN)) + metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op) + + (metrics_to_values, + metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map) + + summary_ops = [] + for metric_name, metric_value in six.iteritems(metrics_to_values): + op = tf.summary.scalar(metric_name, metric_value) + op = tf.Print(op, [metric_value], metric_name) + summary_ops.append(op) + + summary_op = tf.summary.merge(summary_ops) + summary_hook = contrib_training.SummaryAtEndHook( + log_dir=FLAGS.eval_logdir, summary_op=summary_op) + hooks = [summary_hook] + + num_eval_iters = None + if FLAGS.max_number_of_evaluations > 0: + num_eval_iters = FLAGS.max_number_of_evaluations + + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer + .TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS) + contrib_training.evaluate_repeatedly( + checkpoint_dir=FLAGS.checkpoint_dir, + master=FLAGS.master, + eval_ops=list(metrics_to_updates.values()), + max_number_of_evaluations=num_eval_iters, + hooks=hooks, + eval_interval_secs=FLAGS.eval_interval_secs) + + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_dir') + flags.mark_flag_as_required('eval_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/deeplab/models/research/deeplab/evaluation/README.md b/deeplab/models/research/deeplab/evaluation/README.md new file mode 100644 index 0000000..6925538 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/README.md @@ -0,0 +1,311 @@ +# Evaluation Metrics for Whole Image Parsing + +Whole Image Parsing [1], also known as Panoptic Segmentation [2], generalizes +the tasks of semantic segmentation for "stuff" classes and instance +segmentation for "thing" classes, assigning both semantic and instance labels +to every pixel in an image. + +Previous works evaluate the parsing result with separate metrics (e.g., one for +semantic segmentation result and one for object detection result). Recently, +Kirillov et al. propose the unified instance-based Panoptic Quality (PQ) metric +[2] into several benchmarks [3, 4]. + +However, we notice that the instance-based PQ metric often places +disproportionate emphasis on small instance parsing, as well as on "thing" over +"stuff" classes. To remedy these effects, we propose an alternative +region-based Parsing Covering (PC) metric [5], which adapts the Covering +metric [6], previously used for class-agnostics segmentation quality +evaluation, to the task of image parsing. + +Here, we provide implementation of both PQ and PC for evaluating the parsing +results. We briefly explain both metrics below for reference. + +## Panoptic Quality (PQ) + +Given a groundtruth segmentation S and a predicted segmentation S', PQ is +defined as follows: + +

+ +

+ +where R and R' are groundtruth regions and predicted regions respectively, +and |TP|, |FP|, and |FN| are the number of true positives, false postives, +and false negatives. The matching is determined by a threshold of 0.5 +Intersection-Over-Union (IOU). + +PQ treats all regions of the same ‘stuff‘ class as one instance, and the +size of instances is not considered. For example, instances with 10 Ă— 10 +pixels contribute equally to the metric as instances with 1000 Ă— 1000 pixels. +Therefore, PQ is sensitive to false positives with small regions and some +heuristics could improve the performance, such as removing those small +regions (as also pointed out in the open-sourced evaluation code from [2]). +Thus, we argue that PQ is suitable in applications where one cares equally for +the parsing quality of instances irrespective of their sizes. + +## Parsing Covering (PC) + +We notice that there are applications where one pays more attention to large +objects, e.g., autonomous driving (where nearby objects are more important +than far away ones). Motivated by this, we propose to also evaluate the +quality of image parsing results by extending the existing Covering metric [5], +which accounts for instance sizes. Specifically, our proposed metric, Parsing +Covering (PC), is defined as follows: + +

+ +

+ + +where Si and Si' are the groundtruth segmentation and +predicted segmentation for the i-th semantic class respectively, and +Ni is the total number of pixels of groundtruth regions from +Si . The Covering for class i, Covi , is computed in +the same way as the original Covering metric except that only groundtruth +regions from Si and predicted regions from Si' are +considered. PC is then obtained by computing the average of Covi +over C semantic classes. + +A notable difference between PQ and the proposed PC is that there is no +matching involved in PC and hence no matching threshold. As an attempt to +treat equally "thing" and "stuff", the segmentation of "stuff" classes still +receives partial PC score if the segmentation is only partially correct. For +example, if one out of three equally-sized trees is perfectly segmented, the +model will get the same partial score by using PC regardless of considering +"tree" as "stuff" or "thing". + +## Tutorial + +To evaluate the parsing results with PQ and PC, we provide two options: + +1. Python off-line evaluation with results saved in the [COCO format](http://cocodataset.org/#format-results). +2. TensorFlow on-line evaluation. + +Below, we explain each option in detail. + +#### 1. Python off-line evaluation with results saved in COCO format + +[COCO result format](http://cocodataset.org/#format-results) has been +adopted by several benchmarks [3, 4]. Therefore, we provide a convenient +function, `eval_coco_format`, to evaluate the results saved in COCO format +in terms of PC and re-implemented PQ. + +Before using the provided function, the users need to download the official COCO +panotpic segmentation task API. Please see [installation](../g3doc/installation.md#add-libraries-to-pythonpath) +for reference. + +Once the official COCO panoptic segmentation task API is downloaded, the +users should be able to run the `eval_coco_format.py` to evaluate the parsing +results in terms of both PC and reimplemented PQ. + +To be concrete, let's take a look at the function, `eval_coco_format` in +`eval_coco_format.py`: + +```python +eval_coco_format(gt_json_file, + pred_json_file, + gt_folder=None, + pred_folder=None, + metric='pq', + num_categories=201, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=None, + normalize_by_image_size=True, + num_workers=0, + print_digits=3): + +``` +where + +1. `gt_json_file`: Path to a JSON file giving ground-truth annotations in COCO +format. +2. `pred_json_file`: Path to a JSON file for the predictions to evaluate. +3. `gt_folder`: Folder containing panoptic-format ID images to match +ground-truth annotations to image regions. +4. `pred_folder`: Path to a folder containing ID images for predictions. +5. `metric`: Name of a metric to compute. Set to `pc`, `pq` for evaluation in PC +or PQ, respectively. +6. `num_categories`: The number of segmentation categories (or "classes") in the +dataset. +7. `ignored_label`: A category id that is ignored in evaluation, e.g. the "void" +label in COCO panoptic segmentation dataset. +8. `max_instances_per_category`: The maximum number of instances for each +category to ensure unique instance labels. +9. `intersection_offset`: The maximum number of unique labels. +10. `normalize_by_image_size`: Whether to normalize groundtruth instance region +areas by image size when using PC. +11. `num_workers`: If set to a positive number, will spawn child processes to +compute parts of the metric in parallel by splitting the images between the +workers. If set to -1, will use the value of multiprocessing.cpu_count(). +12. `print_digits`: Number of significant digits to print in summary of computed +metrics. + +The input arguments have default values set for the COCO panoptic segmentation +dataset. Thus, users only need to provide the `gt_json_file` and the +`pred_json_file` (following the COCO format) to run the evaluation on COCO with +PQ. If users want to evaluate the results on other datasets, they may need +to change the default values. + +As an example, the interested users could take a look at the provided unit +test, `test_compare_pq_with_reference_eval`, in `eval_coco_format_test.py`. + +#### 2. TensorFlow on-line evaluation + +Users may also want to run the TensorFlow on-line evaluation, similar to the +[tf.contrib.metrics.streaming_mean_iou](https://www.tensorflow.org/api_docs/python/tf/contrib/metrics/streaming_mean_iou). + +Below, we provide a code snippet that shows how to use the provided +`streaming_panoptic_quality` and `streaming_parsing_covering`. + +```python +metric_map = {} +metric_map['panoptic_quality'] = streaming_metrics.streaming_panoptic_quality( + category_label, + instance_label, + category_prediction, + instance_prediction, + num_classes=201, + max_instances_per_category=256, + ignored_label=0, + offset=256*256) +metric_map['parsing_covering'] = streaming_metrics.streaming_parsing_covering( + category_label, + instance_label, + category_prediction, + instance_prediction, + num_classes=201, + max_instances_per_category=256, + ignored_label=0, + offset=256*256, + normalize_by_image_size=True) +metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map( + metric_map) +``` +where `metric_map` is a dictionary storing the streamed results of PQ and PC. + +The `category_label` and the `instance_label` are the semantic segmentation and +instance segmentation groundtruth, respectively. That is, in the panoptic +segmentation format: +panoptic_label = category_label * max_instances_per_category + instance_label. +Similarly, the `category_prediction` and the `instance_prediction` are the +predicted semantic segmentation and instance segmentation, respectively. + +Below, we provide a code snippet about how to summarize the results in the +context of tf.summary. + +```python +summary_ops = [] +for metric_name, metric_value in metrics_to_values.iteritems(): + if metric_name == 'panoptic_quality': + [pq, sq, rq, total_tp, total_fn, total_fp] = tf.unstack( + metric_value, 6, axis=0) + panoptic_metrics = { + # Panoptic quality. + 'pq': pq, + # Segmentation quality. + 'sq': sq, + # Recognition quality. + 'rq': rq, + # Total true positives. + 'total_tp': total_tp, + # Total false negatives. + 'total_fn': total_fn, + # Total false positives. + 'total_fp': total_fp, + } + # Find the valid classes that will be used for evaluation. We will + # ignore the `ignore_label` class and other classes which have (tp + fn + # + fp) equal to 0. + valid_classes = tf.logical_and( + tf.not_equal(tf.range(0, num_classes), void_label), + tf.not_equal(total_tp + total_fn + total_fp, 0)) + for target_metric, target_value in panoptic_metrics.iteritems(): + output_metric_name = '{}_{}'.format(metric_name, target_metric) + op = tf.summary.scalar( + output_metric_name, + tf.reduce_mean(tf.boolean_mask(target_value, valid_classes))) + op = tf.Print(op, [target_value], output_metric_name + '_classwise: ', + summarize=num_classes) + op = tf.Print( + op, + [tf.reduce_mean(tf.boolean_mask(target_value, valid_classes))], + output_metric_name + '_mean: ', + summarize=1) + summary_ops.append(op) + elif metric_name == 'parsing_covering': + [per_class_covering, + total_per_class_weighted_ious, + total_per_class_gt_areas] = tf.unstack(metric_value, 3, axis=0) + # Find the valid classes that will be used for evaluation. We will + # ignore the `void_label` class and other classes which have + # total_per_class_weighted_ious + total_per_class_gt_areas equal to 0. + valid_classes = tf.logical_and( + tf.not_equal(tf.range(0, num_classes), void_label), + tf.not_equal( + total_per_class_weighted_ious + total_per_class_gt_areas, 0)) + op = tf.summary.scalar( + metric_name, + tf.reduce_mean(tf.boolean_mask(per_class_covering, valid_classes))) + op = tf.Print(op, [per_class_covering], metric_name + '_classwise: ', + summarize=num_classes) + op = tf.Print( + op, + [tf.reduce_mean( + tf.boolean_mask(per_class_covering, valid_classes))], + metric_name + '_mean: ', + summarize=1) + summary_ops.append(op) + else: + raise ValueError('The metric_name "%s" is not supported.' % metric_name) +``` + +Afterwards, the users could use the following code to run the evaluation in +TensorFlow. + +Users can take a look at eval.py for reference which provides a simple +example to run the streaming evaluation of mIOU for semantic segmentation. + +```python +metric_values = slim.evaluation.evaluation_loop( + master=FLAGS.master, + checkpoint_dir=FLAGS.checkpoint_dir, + logdir=FLAGS.eval_logdir, + num_evals=num_batches, + eval_op=metrics_to_updates.values(), + final_op=metrics_to_values.values(), + summary_op=tf.summary.merge(summary_ops), + max_number_of_evaluations=FLAGS.max_number_of_evaluations, + eval_interval_secs=FLAGS.eval_interval_secs) +``` + + +### References + +1. **Image Parsing: Unifying Segmentation, Detection, and Recognition**
+ Zhuowen Tu, Xiangrong Chen, Alan L. Yuille, and Song-Chun Zhu
+ IJCV, 2005. + +2. **Panoptic Segmentation**
+ Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother and Piotr + DollĂ¡r
+ arXiv:1801.00868, 2018. + +3. **Microsoft COCO: Common Objects in Context**
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross + Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, + Piotr Dollar
+ In the Proc. of ECCV, 2014. + +4. **The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes**
+ Gerhard Neuhold, Tobias Ollmann, Samuel Rota BulĂ², and Peter Kontschieder
+ In the Proc. of ICCV, 2017. + +5. **DeeperLab: Single-Shot Image Parser**
+ Tien-Ju Yang, Maxwell D. Collins, Yukun Zhu, Jyh-Jing Hwang, Ting Liu, + Xiao Zhang, Vivienne Sze, George Papandreou, Liang-Chieh Chen
+ arXiv: 1902.05093, 2019. + +6. **Contour Detection and Hierarchical Image Segmentation**
+ Pablo Arbelaez, Michael Maire, Charless Fowlkes, and Jitendra Malik
+ PAMI, 2011 diff --git a/deeplab/models/research/deeplab/evaluation/__init__.py b/deeplab/models/research/deeplab/evaluation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/evaluation/base_metric.py b/deeplab/models/research/deeplab/evaluation/base_metric.py new file mode 100644 index 0000000..ee7606e --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/base_metric.py @@ -0,0 +1,191 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines the top-level interface for evaluating segmentations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import numpy as np +import six + + +_EPSILON = 1e-10 + + +def realdiv_maybe_zero(x, y): + """Element-wise x / y where y may contain zeros, for those returns 0 too.""" + return np.where( + np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y)) + + +@six.add_metaclass(abc.ABCMeta) +class SegmentationMetric(object): + """Abstract base class for computers of segmentation metrics. + + Subclasses will implement both: + 1. Comparing the predicted segmentation for an image with the groundtruth. + 2. Computing the final metric over a set of images. + These are often done as separate steps, due to the need to accumulate + intermediate values other than the metric itself across images, computing the + actual metric value only on these accumulations after all the images have been + compared. + + A simple usage would be: + + metric = MetricImplementation(...) + for , in evaluation_set: + = run_segmentation() + metric.compare_and_accumulate(, ) + print(metric.result()) + + """ + + def __init__(self, num_categories, ignored_label, max_instances_per_category, + offset): + """Base initialization for SegmentationMetric. + + Args: + num_categories: The number of segmentation categories (or "classes" in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the void + label as defined in COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + offset: The maximum number of unique labels. This is used, by multiplying + the ground-truth labels, to generate unique ids for individual regions + of overlap between groundtruth and predicted segments. + """ + self.num_categories = num_categories + self.ignored_label = ignored_label + self.max_instances_per_category = max_instances_per_category + self.offset = offset + self.reset() + + def _naively_combine_labels(self, category_array, instance_array): + """Naively creates a combined label array from categories and instances.""" + return (category_array.astype(np.uint32) * self.max_instances_per_category + + instance_array.astype(np.uint32)) + + @abc.abstractmethod + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """Compares predicted segmentation with groundtruth, accumulates its metric. + + It is not assumed that instance ids are unique across different categories. + See for example combine_semantic_and_instance_predictions.py in official + PanopticAPI evaluation code for issues to consider when fusing category + and instance labels. + + Instances ids of the ignored category have the meaning that id 0 is "void" + and remaining ones are crowd instances. + + Args: + groundtruth_category_array: A 2D numpy uint16 array of groundtruth + per-pixel category labels. + groundtruth_instance_array: A 2D numpy uint16 array of groundtruth + instance labels. + predicted_category_array: A 2D numpy uint16 array of predicted per-pixel + category labels. + predicted_instance_array: A 2D numpy uint16 array of predicted instance + labels. + + Returns: + The value of the metric over all comparisons done so far, including this + one, as a float scalar. + """ + raise NotImplementedError('Must be implemented in subclasses.') + + @abc.abstractmethod + def result(self): + """Computes the metric over all comparisons done so far.""" + raise NotImplementedError('Must be implemented in subclasses.') + + @abc.abstractmethod + def detailed_results(self, is_thing=None): + """Computes and returns the detailed final metric results. + + Args: + is_thing: A boolean array of length `num_categories`. The entry + `is_thing[category_id]` is True iff that category is a "thing" category + instead of "stuff." + + Returns: + A dictionary with a breakdown of metrics and/or metric factors by things, + stuff, and all categories. + """ + raise NotImplementedError('Not implemented in subclasses.') + + @abc.abstractmethod + def result_per_category(self): + """For supported metrics, return individual per-category metric values. + + Returns: + A numpy array of shape `[self.num_categories]`, where index `i` is the + metrics value over only that category. + """ + raise NotImplementedError('Not implemented in subclass.') + + def print_detailed_results(self, is_thing=None, print_digits=3): + """Prints out a detailed breakdown of metric results. + + Args: + is_thing: A boolean array of length num_categories. + `is_thing[category_id]` will say whether that category is a "thing" + rather than "stuff." + print_digits: Number of significant digits to print in computed metrics. + """ + raise NotImplementedError('Not implemented in subclass.') + + @abc.abstractmethod + def merge(self, other_instance): + """Combines the accumulated results of another instance into self. + + The following two cases should put `metric_a` into an equivalent state. + + Case 1 (with merge): + + metric_a = MetricsSubclass(...) + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + + metric_b = MetricsSubclass(...) + metric_b.compare_and_accumulate() + metric_b.compare_and_accumulate() + + metric_a.merge(metric_b) + + Case 2 (without merge): + + metric_a = MetricsSubclass(...) + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + + Args: + other_instance: Another compatible instance of the same metric subclass. + """ + raise NotImplementedError('Not implemented in subclass.') + + @abc.abstractmethod + def reset(self): + """Resets the accumulation to the metric class's state at initialization. + + Note that this function will be called in SegmentationMetric.__init__. + """ + raise NotImplementedError('Must be implemented in subclasses.') diff --git a/deeplab/models/research/deeplab/evaluation/eval_coco_format.py b/deeplab/models/research/deeplab/evaluation/eval_coco_format.py new file mode 100644 index 0000000..1a26446 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/eval_coco_format.py @@ -0,0 +1,338 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Computes evaluation metrics on groundtruth and predictions in COCO format. + +The Common Objects in Context (COCO) dataset defines a format for specifying +combined semantic and instance segmentations as "panoptic" segmentations. This +is done with the combination of JSON and image files as specified at: +http://cocodataset.org/#format-results +where the JSON file specifies the overall structure of the result, +including the categories for each annotation, and the images specify the image +region for each annotation in that image by its ID. + +This script computes additional metrics such as Parsing Covering on datasets and +predictions in this format. An implementation of Panoptic Quality is also +provided for convenience. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import multiprocessing +import os + +from absl import app +from absl import flags +from absl import logging +import numpy as np +from PIL import Image +import utils as panopticapi_utils +import six + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import parsing_covering + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'gt_json_file', None, + ' Path to a JSON file giving ground-truth annotations in COCO format.') +flags.DEFINE_string('pred_json_file', None, + 'Path to a JSON file for the predictions to evaluate.') +flags.DEFINE_string( + 'gt_folder', None, + 'Folder containing panoptic-format ID images to match ground-truth ' + 'annotations to image regions.') +flags.DEFINE_string('pred_folder', None, + 'Folder containing ID images for predictions.') +flags.DEFINE_enum( + 'metric', 'pq', ['pq', 'pc'], 'Shorthand name of a metric to compute. ' + 'Supported values are:\n' + 'Panoptic Quality (pq)\n' + 'Parsing Covering (pc)') +flags.DEFINE_integer( + 'num_categories', 201, + 'The number of segmentation categories (or "classes") in the dataset.') +flags.DEFINE_integer( + 'ignored_label', 0, + 'A category id that is ignored in evaluation, e.g. the void label as ' + 'defined in COCO panoptic segmentation dataset.') +flags.DEFINE_integer( + 'max_instances_per_category', 256, + 'The maximum number of instances for each category. Used in ensuring ' + 'unique instance labels.') +flags.DEFINE_integer('intersection_offset', None, + 'The maximum number of unique labels.') +flags.DEFINE_bool( + 'normalize_by_image_size', True, + 'Whether to normalize groundtruth instance region areas by image size. If ' + 'True, groundtruth instance areas and weighted IoUs will be divided by the ' + 'size of the corresponding image before accumulated across the dataset. ' + 'Only used for Parsing Covering (pc) evaluation.') +flags.DEFINE_integer( + 'num_workers', 0, 'If set to a positive number, will spawn child processes ' + 'to compute parts of the metric in parallel by splitting ' + 'the images between the workers. If set to -1, will use ' + 'the value of multiprocessing.cpu_count().') +flags.DEFINE_integer('print_digits', 3, + 'Number of significant digits to print in metrics.') + + +def _build_metric(metric, + num_categories, + ignored_label, + max_instances_per_category, + intersection_offset=None, + normalize_by_image_size=True): + """Creates a metric aggregator objet of the given name.""" + if metric == 'pq': + logging.warning('One should check Panoptic Quality results against the ' + 'official COCO API code. Small numerical differences ' + '(< 0.1%) can be magnified by rounding.') + return panoptic_quality.PanopticQuality(num_categories, ignored_label, + max_instances_per_category, + intersection_offset) + elif metric == 'pc': + return parsing_covering.ParsingCovering( + num_categories, ignored_label, max_instances_per_category, + intersection_offset, normalize_by_image_size) + else: + raise ValueError('No implementation for metric "%s"' % metric) + + +def _matched_annotations(gt_json, pred_json): + """Yields a set of (groundtruth, prediction) image annotation pairs..""" + image_id_to_pred_ann = { + annotation['image_id']: annotation + for annotation in pred_json['annotations'] + } + for gt_ann in gt_json['annotations']: + image_id = gt_ann['image_id'] + pred_ann = image_id_to_pred_ann[image_id] + yield gt_ann, pred_ann + + +def _open_panoptic_id_image(image_path): + """Loads a COCO-format panoptic ID image from file.""" + return panopticapi_utils.rgb2id( + np.array(Image.open(image_path), dtype=np.uint32)) + + +def _split_panoptic(ann_json, id_array, ignored_label, allow_crowds): + """Given the COCO JSON and ID map, splits into categories and instances.""" + category = np.zeros(id_array.shape, np.uint16) + instance = np.zeros(id_array.shape, np.uint16) + next_instance_id = collections.defaultdict(int) + # Skip instance label 0 for ignored label. That is reserved for void. + next_instance_id[ignored_label] = 1 + for segment_info in ann_json['segments_info']: + if allow_crowds and segment_info['iscrowd']: + category_id = ignored_label + else: + category_id = segment_info['category_id'] + mask = np.equal(id_array, segment_info['id']) + category[mask] = category_id + instance[mask] = next_instance_id[category_id] + next_instance_id[category_id] += 1 + return category, instance + + +def _category_and_instance_from_annotation(ann_json, folder, ignored_label, + allow_crowds): + """Given the COCO JSON annotations, finds maps of categories and instances.""" + panoptic_id_image = _open_panoptic_id_image( + os.path.join(folder, ann_json['file_name'])) + return _split_panoptic(ann_json, panoptic_id_image, ignored_label, + allow_crowds) + + +def _compute_metric(metric_aggregator, gt_folder, pred_folder, + annotation_pairs): + """Iterates over matched annotation pairs and computes a metric over them.""" + for gt_ann, pred_ann in annotation_pairs: + # We only expect "iscrowd" to appear in the ground-truth, and not in model + # output. In predicted JSON it is simply ignored, as done in official code. + gt_category, gt_instance = _category_and_instance_from_annotation( + gt_ann, gt_folder, metric_aggregator.ignored_label, True) + pred_category, pred_instance = _category_and_instance_from_annotation( + pred_ann, pred_folder, metric_aggregator.ignored_label, False) + + metric_aggregator.compare_and_accumulate(gt_category, gt_instance, + pred_category, pred_instance) + return metric_aggregator + + +def _iterate_work_queue(work_queue): + """Creates an iterable that retrieves items from a queue until one is None.""" + task = work_queue.get(block=True) + while task is not None: + yield task + task = work_queue.get(block=True) + + +def _run_metrics_worker(metric_aggregator, gt_folder, pred_folder, work_queue, + result_queue): + result = _compute_metric(metric_aggregator, gt_folder, pred_folder, + _iterate_work_queue(work_queue)) + result_queue.put(result, block=True) + + +def _is_thing_array(categories_json, ignored_label): + """is_thing[category_id] is a bool on if category is "thing" or "stuff".""" + is_thing_dict = {} + for category_json in categories_json: + is_thing_dict[category_json['id']] = bool(category_json['isthing']) + + # Check our assumption that the category ids are consecutive. + # Usually metrics should be able to handle this case, but adding a warning + # here. + max_category_id = max(six.iterkeys(is_thing_dict)) + if len(is_thing_dict) != max_category_id + 1: + seen_ids = six.viewkeys(is_thing_dict) + all_ids = set(six.moves.range(max_category_id + 1)) + unseen_ids = all_ids.difference(seen_ids) + if unseen_ids != {ignored_label}: + logging.warning( + 'Nonconsecutive category ids or no category JSON specified for ids: ' + '%s', unseen_ids) + + is_thing_array = np.zeros(max_category_id + 1) + for category_id, is_thing in six.iteritems(is_thing_dict): + is_thing_array[category_id] = is_thing + + return is_thing_array + + +def eval_coco_format(gt_json_file, + pred_json_file, + gt_folder=None, + pred_folder=None, + metric='pq', + num_categories=201, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=None, + normalize_by_image_size=True, + num_workers=0, + print_digits=3): + """Top-level code to compute metrics on a COCO-format result. + + Note that the default values are set for COCO panoptic segmentation dataset, + and thus the users may want to change it for their own dataset evaluation. + + Args: + gt_json_file: Path to a JSON file giving ground-truth annotations in COCO + format. + pred_json_file: Path to a JSON file for the predictions to evaluate. + gt_folder: Folder containing panoptic-format ID images to match ground-truth + annotations to image regions. + pred_folder: Folder containing ID images for predictions. + metric: Name of a metric to compute. + num_categories: The number of segmentation categories (or "classes") in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the "void" + label as defined in the COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + intersection_offset: The maximum number of unique labels. + normalize_by_image_size: Whether to normalize groundtruth instance region + areas by image size. If True, groundtruth instance areas and weighted IoUs + will be divided by the size of the corresponding image before accumulated + across the dataset. Only used for Parsing Covering (pc) evaluation. + num_workers: If set to a positive number, will spawn child processes to + compute parts of the metric in parallel by splitting the images between + the workers. If set to -1, will use the value of + multiprocessing.cpu_count(). + print_digits: Number of significant digits to print in summary of computed + metrics. + + Returns: + The computed result of the metric as a float scalar. + """ + with open(gt_json_file, 'r') as gt_json_fo: + gt_json = json.load(gt_json_fo) + with open(pred_json_file, 'r') as pred_json_fo: + pred_json = json.load(pred_json_fo) + if gt_folder is None: + gt_folder = gt_json_file.replace('.json', '') + if pred_folder is None: + pred_folder = pred_json_file.replace('.json', '') + if intersection_offset is None: + intersection_offset = (num_categories + 1) * max_instances_per_category + + metric_aggregator = _build_metric( + metric, num_categories, ignored_label, max_instances_per_category, + intersection_offset, normalize_by_image_size) + + if num_workers == -1: + logging.info('Attempting to get the CPU count to set # workers.') + num_workers = multiprocessing.cpu_count() + + if num_workers > 0: + logging.info('Computing metric in parallel with %d workers.', num_workers) + work_queue = multiprocessing.Queue() + result_queue = multiprocessing.Queue() + workers = [] + worker_args = (metric_aggregator, gt_folder, pred_folder, work_queue, + result_queue) + for _ in six.moves.range(num_workers): + workers.append( + multiprocessing.Process(target=_run_metrics_worker, args=worker_args)) + for worker in workers: + worker.start() + for ann_pair in _matched_annotations(gt_json, pred_json): + work_queue.put(ann_pair, block=True) + + # Will cause each worker to return a result and terminate upon recieving a + # None task. + for _ in six.moves.range(num_workers): + work_queue.put(None, block=True) + + # Retrieve results. + for _ in six.moves.range(num_workers): + metric_aggregator.merge(result_queue.get(block=True)) + + for worker in workers: + worker.join() + else: + logging.info('Computing metric in a single process.') + annotation_pairs = _matched_annotations(gt_json, pred_json) + _compute_metric(metric_aggregator, gt_folder, pred_folder, annotation_pairs) + + is_thing = _is_thing_array(gt_json['categories'], ignored_label) + metric_aggregator.print_detailed_results( + is_thing=is_thing, print_digits=print_digits) + return metric_aggregator.detailed_results(is_thing=is_thing) + + +def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + eval_coco_format(FLAGS.gt_json_file, FLAGS.pred_json_file, FLAGS.gt_folder, + FLAGS.pred_folder, FLAGS.metric, FLAGS.num_categories, + FLAGS.ignored_label, FLAGS.max_instances_per_category, + FLAGS.intersection_offset, FLAGS.normalize_by_image_size, + FLAGS.num_workers, FLAGS.print_digits) + + +if __name__ == '__main__': + flags.mark_flags_as_required( + ['gt_json_file', 'gt_folder', 'pred_json_file', 'pred_folder']) + app.run(main) diff --git a/deeplab/models/research/deeplab/evaluation/eval_coco_format_test.py b/deeplab/models/research/deeplab/evaluation/eval_coco_format_test.py new file mode 100644 index 0000000..d9093ff --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/eval_coco_format_test.py @@ -0,0 +1,140 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for eval_coco_format script.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl.testing import absltest +import evaluation as panopticapi_eval + +from deeplab.evaluation import eval_coco_format + +_TEST_DIR = 'deeplab/evaluation/testdata' + +FLAGS = flags.FLAGS + + +class EvalCocoFormatTest(absltest.TestCase): + + def test_compare_pq_with_reference_eval(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + panopticapi_results = panopticapi_eval.pq_compute( + gt_json_file, pred_json_file, gt_folder, pred_folder) + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pq', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256)) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + for cat_group in ['All', 'Things', 'Stuff']: + self.assertCountEqual(deeplab_results[cat_group], ['pq', 'sq', 'rq', 'n']) + for metric in ['pq', 'sq', 'rq', 'n']: + self.assertAlmostEqual(deeplab_results[cat_group][metric], + panopticapi_results[cat_group][metric]) + + def test_compare_pc_with_golden_value(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + normalize_by_image_size=False) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + for cat_group in ['All', 'Things', 'Stuff']: + self.assertCountEqual(deeplab_results[cat_group], ['pc', 'n']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561) + self.assertEqual(deeplab_results['All']['n'], 6) + self.assertAlmostEqual(deeplab_results['Things']['pc'], 0.5890529) + self.assertEqual(deeplab_results['Things']['n'], 4) + self.assertAlmostEqual(deeplab_results['Stuff']['pc'], 0.86821097) + self.assertEqual(deeplab_results['Stuff']['n'], 2) + + def test_compare_pc_with_golden_value_normalize_by_size(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + normalize_by_image_size=True) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68214908840) + + def test_pc_with_multiple_workers(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + num_workers=3, + normalize_by_image_size=False) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561668) + + +if __name__ == '__main__': + absltest.main() diff --git a/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pc.png b/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pc.png new file mode 100644 index 0000000..90f15e7 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pc.png differ diff --git a/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pq.png b/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pq.png new file mode 100644 index 0000000..13a4393 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/g3doc/img/equation_pq.png differ diff --git a/deeplab/models/research/deeplab/evaluation/panoptic_quality.py b/deeplab/models/research/deeplab/evaluation/panoptic_quality.py new file mode 100644 index 0000000..f7d0f3f --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/panoptic_quality.py @@ -0,0 +1,259 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the Panoptic Quality metric. + +Panoptic Quality is an instance-based metric for evaluating the task of +image parsing, aka panoptic segmentation. + +Please see the paper for details: +"Panoptic Segmentation", Alexander Kirillov, Kaiming He, Ross Girshick, +Carsten Rother and Piotr Dollar. arXiv:1801.00868, 2018. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import prettytable +import six + +from deeplab.evaluation import base_metric + + +def _ids_to_counts(id_array): + """Given a numpy array, a mapping from each unique entry to its count.""" + ids, counts = np.unique(id_array, return_counts=True) + return dict(six.moves.zip(ids, counts)) + + +class PanopticQuality(base_metric.SegmentationMetric): + """Metric class for Panoptic Quality. + + "Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick, + Carsten Rother, Piotr Dollar. + https://arxiv.org/abs/1801.00868 + """ + + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """See base class.""" + # First, combine the category and instance labels so that every unique + # value for (category, instance) is assigned a unique integer label. + pred_segment_id = self._naively_combine_labels(predicted_category_array, + predicted_instance_array) + gt_segment_id = self._naively_combine_labels(groundtruth_category_array, + groundtruth_instance_array) + + # Pre-calculate areas for all groundtruth and predicted segments. + gt_segment_areas = _ids_to_counts(gt_segment_id) + pred_segment_areas = _ids_to_counts(pred_segment_id) + + # We assume there is only one void segment and it has instance id = 0. + void_segment_id = self.ignored_label * self.max_instances_per_category + + # There may be other ignored groundtruth segments with instance id > 0, find + # those ids using the unique segment ids extracted with the area computation + # above. + ignored_segment_ids = { + gt_segment_id for gt_segment_id in six.iterkeys(gt_segment_areas) + if (gt_segment_id // + self.max_instances_per_category) == self.ignored_label + } + + # Next, combine the groundtruth and predicted labels. Dividing up the pixels + # based on which groundtruth segment and which predicted segment they belong + # to, this will assign a different 32-bit integer label to each choice + # of (groundtruth segment, predicted segment), encoded as + # gt_segment_id * offset + pred_segment_id. + intersection_id_array = ( + gt_segment_id.astype(np.uint32) * self.offset + + pred_segment_id.astype(np.uint32)) + + # For every combination of (groundtruth segment, predicted segment) with a + # non-empty intersection, this counts the number of pixels in that + # intersection. + intersection_areas = _ids_to_counts(intersection_id_array) + + # Helper function that computes the area of the overlap between a predicted + # segment and the ground-truth void/ignored segment. + def prediction_void_overlap(pred_segment_id): + void_intersection_id = void_segment_id * self.offset + pred_segment_id + return intersection_areas.get(void_intersection_id, 0) + + # Compute overall ignored overlap. + def prediction_ignored_overlap(pred_segment_id): + total_ignored_overlap = 0 + for ignored_segment_id in ignored_segment_ids: + intersection_id = ignored_segment_id * self.offset + pred_segment_id + total_ignored_overlap += intersection_areas.get(intersection_id, 0) + return total_ignored_overlap + + # Sets that are populated with which segments groundtruth/predicted segments + # have been matched with overlapping predicted/groundtruth segments + # respectively. + gt_matched = set() + pred_matched = set() + + # Calculate IoU per pair of intersecting segments of the same category. + for intersection_id, intersection_area in six.iteritems(intersection_areas): + gt_segment_id = intersection_id // self.offset + pred_segment_id = intersection_id % self.offset + + gt_category = gt_segment_id // self.max_instances_per_category + pred_category = pred_segment_id // self.max_instances_per_category + if gt_category != pred_category: + continue + + # Union between the groundtruth and predicted segments being compared does + # not include the portion of the predicted segment that consists of + # groundtruth "void" pixels. + union = ( + gt_segment_areas[gt_segment_id] + + pred_segment_areas[pred_segment_id] - intersection_area - + prediction_void_overlap(pred_segment_id)) + iou = intersection_area / union + if iou > 0.5: + self.tp_per_class[gt_category] += 1 + self.iou_per_class[gt_category] += iou + gt_matched.add(gt_segment_id) + pred_matched.add(pred_segment_id) + + # Count false negatives for each category. + for gt_segment_id in six.iterkeys(gt_segment_areas): + if gt_segment_id in gt_matched: + continue + category = gt_segment_id // self.max_instances_per_category + # Failing to detect a void segment is not a false negative. + if category == self.ignored_label: + continue + self.fn_per_class[category] += 1 + + # Count false positives for each category. + for pred_segment_id in six.iterkeys(pred_segment_areas): + if pred_segment_id in pred_matched: + continue + # A false positive is not penalized if is mostly ignored in the + # groundtruth. + if (prediction_ignored_overlap(pred_segment_id) / + pred_segment_areas[pred_segment_id]) > 0.5: + continue + category = pred_segment_id // self.max_instances_per_category + self.fp_per_class[category] += 1 + + return self.result() + + def _valid_categories(self): + """Categories with a "valid" value for the metric, have > 0 instances. + + We will ignore the `ignore_label` class and other classes which have + `tp + fn + fp = 0`. + + Returns: + Boolean array of shape `[num_categories]`. + """ + valid_categories = np.not_equal( + self.tp_per_class + self.fn_per_class + self.fp_per_class, 0) + if self.ignored_label >= 0 and self.ignored_label < self.num_categories: + valid_categories[self.ignored_label] = False + return valid_categories + + def detailed_results(self, is_thing=None): + """See base class.""" + valid_categories = self._valid_categories() + + # If known, break down which categories are valid _and_ things/stuff. + category_sets = collections.OrderedDict() + category_sets['All'] = valid_categories + if is_thing is not None: + category_sets['Things'] = np.logical_and(valid_categories, is_thing) + category_sets['Stuff'] = np.logical_and(valid_categories, + np.logical_not(is_thing)) + + # Compute individual per-class metrics that constitute factors of PQ. + sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class) + rq = base_metric.realdiv_maybe_zero( + self.tp_per_class, + self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class) + pq = np.multiply(sq, rq) + + # Assemble detailed results dictionary. + results = {} + for category_set_name, in_category_set in six.iteritems(category_sets): + if np.any(in_category_set): + results[category_set_name] = { + 'pq': np.mean(pq[in_category_set]), + 'sq': np.mean(sq[in_category_set]), + 'rq': np.mean(rq[in_category_set]), + # The number of categories in this subset. + 'n': np.sum(in_category_set.astype(np.int32)), + } + else: + results[category_set_name] = {'pq': 0, 'sq': 0, 'rq': 0, 'n': 0} + + return results + + def result_per_category(self): + """See base class.""" + sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class) + rq = base_metric.realdiv_maybe_zero( + self.tp_per_class, + self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class) + return np.multiply(sq, rq) + + def print_detailed_results(self, is_thing=None, print_digits=3): + """See base class.""" + results = self.detailed_results(is_thing=is_thing) + + tab = prettytable.PrettyTable() + + tab.add_column('', [], align='l') + for fieldname in ['PQ', 'SQ', 'RQ', 'N']: + tab.add_column(fieldname, [], align='r') + + for category_set, subset_results in six.iteritems(results): + data_cols = [ + round(subset_results[col_key], print_digits) * 100 + for col_key in ['pq', 'sq', 'rq'] + ] + data_cols += [subset_results['n']] + tab.add_row([category_set] + data_cols) + + print(tab) + + def result(self): + """See base class.""" + pq_per_class = self.result_per_category() + valid_categories = self._valid_categories() + if not np.any(valid_categories): + return 0. + return np.mean(pq_per_class[valid_categories]) + + def merge(self, other_instance): + """See base class.""" + self.iou_per_class += other_instance.iou_per_class + self.tp_per_class += other_instance.tp_per_class + self.fn_per_class += other_instance.fn_per_class + self.fp_per_class += other_instance.fp_per_class + + def reset(self): + """See base class.""" + self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64) diff --git a/deeplab/models/research/deeplab/evaluation/panoptic_quality_test.py b/deeplab/models/research/deeplab/evaluation/panoptic_quality_test.py new file mode 100644 index 0000000..00c88c2 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/panoptic_quality_test.py @@ -0,0 +1,336 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Panoptic Quality metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from absl.testing import absltest +import numpy as np +import six + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class PanopticQualityTest(absltest.TestCase): + + def test_perfect_match(self): + categories = np.zeros([6, 6], np.uint16) + instances = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 1, 1, 1], + [1, 2, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + pq.compare_and_accumulate(categories, instances, categories, instances) + np.testing.assert_array_equal(pq.iou_per_class, [2.0]) + np.testing.assert_array_equal(pq.tp_per_class, [2]) + np.testing.assert_array_equal(pq.fn_per_class, [0]) + np.testing.assert_array_equal(pq.fp_per_class, [0]) + np.testing.assert_array_equal(pq.result_per_category(), [1.0]) + self.assertEqual(pq.result(), 1.0) + + def test_totally_wrong(self): + det_categories = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + gt_categories = 1 - det_categories + instances = np.zeros([6, 6], np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=2, + ignored_label=2, + max_instances_per_category=1, + offset=16) + pq.compare_and_accumulate(gt_categories, instances, det_categories, + instances) + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 0.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 0]) + np.testing.assert_array_equal(pq.fn_per_class, [1, 1]) + np.testing.assert_array_equal(pq.fp_per_class, [1, 1]) + np.testing.assert_array_equal(pq.result_per_category(), [0.0, 0.0]) + self.assertEqual(pq.result(), 0.0) + + def test_matches_by_iou(self): + good_det_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + gt_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + pq.compare_and_accumulate( + np.zeros_like(gt_labels), gt_labels, np.zeros_like(good_det_labels), + good_det_labels) + + # iou(1, 1) = 28/30 + # iou(2, 2) = 6/8 + np.testing.assert_array_almost_equal(pq.iou_per_class, [28 / 30 + 6 / 8]) + np.testing.assert_array_equal(pq.tp_per_class, [2]) + np.testing.assert_array_equal(pq.fn_per_class, [0]) + np.testing.assert_array_equal(pq.fp_per_class, [0]) + self.assertAlmostEqual(pq.result(), (28 / 30 + 6 / 8) / 2) + + bad_det_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq.reset() + pq.compare_and_accumulate( + np.zeros_like(gt_labels), gt_labels, np.zeros_like(bad_det_labels), + bad_det_labels) + + # iou(1, 1) = 27/32 + np.testing.assert_array_almost_equal(pq.iou_per_class, [27 / 32]) + np.testing.assert_array_equal(pq.tp_per_class, [1]) + np.testing.assert_array_equal(pq.fn_per_class, [1]) + np.testing.assert_array_equal(pq.fp_per_class, [1]) + self.assertAlmostEqual(pq.result(), (27 / 32) * (1 / 2)) + + def test_wrong_instances(self): + categories = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 1, 2, 2], + [1, 2, 2, 1, 2, 2], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + predicted_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + groundtruth_instances = np.zeros([6, 6], dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=10, + offset=100) + pq.compare_and_accumulate(categories, groundtruth_instances, categories, + predicted_instances) + + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 0.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 0]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 1]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 2]) + np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 0]) + self.assertAlmostEqual(pq.result(), 0.5) + + def test_instance_order_is_arbitrary(self): + categories = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 1, 2, 2], + [1, 2, 2, 1, 2, 2], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + predicted_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + groundtruth_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=10, + offset=100) + pq.compare_and_accumulate(categories, groundtruth_instances, categories, + predicted_instances) + + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 2.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 2]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 0]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0]) + np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 1]) + self.assertAlmostEqual(pq.result(), 1.0) + + def test_matches_expected(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256) + pq.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pq.iou_per_class, [2.06104, 5.26827, 0.54069], decimal=4) + np.testing.assert_array_equal(pq.tp_per_class, [1, 7, 1]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 1, 0]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0]) + np.testing.assert_array_almost_equal(pq.result_per_category(), + [2.061038, 0.702436, 0.54069]) + self.assertAlmostEqual(pq.result(), 0.62156287) + + def test_merge_accumulates_all_across_instances(self): + categories = np.zeros([6, 6], np.uint16) + good_det_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + gt_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + good_pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + for _ in six.moves.range(2): + good_pq.compare_and_accumulate(categories, gt_labels, categories, + good_det_labels) + + bad_det_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + bad_pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + for _ in six.moves.range(2): + bad_pq.compare_and_accumulate(categories, gt_labels, categories, + bad_det_labels) + + good_pq.merge(bad_pq) + + np.testing.assert_array_almost_equal( + good_pq.iou_per_class, [2 * (28 / 30 + 6 / 8) + 2 * (27 / 32)]) + np.testing.assert_array_equal(good_pq.tp_per_class, [2 * 2 + 2]) + np.testing.assert_array_equal(good_pq.fn_per_class, [2]) + np.testing.assert_array_equal(good_pq.fp_per_class, [2]) + self.assertAlmostEqual(good_pq.result(), 0.63177083) + + +if __name__ == '__main__': + absltest.main() diff --git a/deeplab/models/research/deeplab/evaluation/parsing_covering.py b/deeplab/models/research/deeplab/evaluation/parsing_covering.py new file mode 100644 index 0000000..a40e55f --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/parsing_covering.py @@ -0,0 +1,246 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the Parsing Covering metric. + +Parsing Covering is a region-based metric for evaluating the task of +image parsing, aka panoptic segmentation. + +Please see the paper for details: +"DeeperLab: Single-Shot Image Parser", Tien-Ju Yang, Maxwell D. Collins, +Yukun Zhu, Jyh-Jing Hwang, Ting Liu, Xiao Zhang, Vivienne Sze, +George Papandreou, Liang-Chieh Chen. arXiv: 1902.05093, 2019. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np +import prettytable +import six + +from deeplab.evaluation import base_metric + + +class ParsingCovering(base_metric.SegmentationMetric): + r"""Metric class for Parsing Covering. + + Computes segmentation covering metric introduced in (Arbelaez, et al., 2010) + with extension to handle multi-class semantic labels (a.k.a. parsing + covering). Specifically, segmentation covering (SC) is defined in Eq. (8) in + (Arbelaez et al., 2010) as: + + SC(c) = \sum_{R\in S}(|R| * \max_{R'\in S'}O(R,R')) / \sum_{R\in S}|R|, + + where S are the groundtruth instance regions and S' are the predicted + instance regions. The parsing covering is simply: + + PC = \sum_{c=1}^{C}SC(c) / C, + + where C is the number of classes. + """ + + def __init__(self, + num_categories, + ignored_label, + max_instances_per_category, + offset, + normalize_by_image_size=True): + """Initialization for ParsingCovering. + + Args: + num_categories: The number of segmentation categories (or "classes" in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the void + label as defined in COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + offset: The maximum number of unique labels. This is used, by multiplying + the ground-truth labels, to generate unique ids for individual regions + of overlap between groundtruth and predicted segments. + normalize_by_image_size: Whether to normalize groundtruth instance region + areas by image size. If True, groundtruth instance areas and weighted + IoUs will be divided by the size of the corresponding image before + accumulated across the dataset. + """ + super(ParsingCovering, self).__init__(num_categories, ignored_label, + max_instances_per_category, offset) + self.normalize_by_image_size = normalize_by_image_size + + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """See base class.""" + # Allocate intermediate data structures. + max_ious = np.zeros([self.num_categories, self.max_instances_per_category], + dtype=np.float64) + gt_areas = np.zeros([self.num_categories, self.max_instances_per_category], + dtype=np.float64) + pred_areas = np.zeros( + [self.num_categories, self.max_instances_per_category], + dtype=np.float64) + # This is a dictionary in the format: + # {(category, gt_instance): [(pred_instance, intersection_area)]}. + intersections = collections.defaultdict(list) + + # First, combine the category and instance labels so that every unique + # value for (category, instance) is assigned a unique integer label. + pred_segment_id = self._naively_combine_labels(predicted_category_array, + predicted_instance_array) + gt_segment_id = self._naively_combine_labels(groundtruth_category_array, + groundtruth_instance_array) + + # Next, combine the groundtruth and predicted labels. Dividing up the pixels + # based on which groundtruth segment and which predicted segment they belong + # to, this will assign a different 32-bit integer label to each choice + # of (groundtruth segment, predicted segment), encoded as + # gt_segment_id * offset + pred_segment_id. + intersection_id_array = ( + gt_segment_id.astype(np.uint32) * self.offset + + pred_segment_id.astype(np.uint32)) + + # For every combination of (groundtruth segment, predicted segment) with a + # non-empty intersection, this counts the number of pixels in that + # intersection. + intersection_ids, intersection_areas = np.unique( + intersection_id_array, return_counts=True) + + # Find areas of all groundtruth and predicted instances, as well as of their + # intersections. + for intersection_id, intersection_area in six.moves.zip( + intersection_ids, intersection_areas): + gt_segment_id = intersection_id // self.offset + gt_category = gt_segment_id // self.max_instances_per_category + if gt_category == self.ignored_label: + continue + gt_instance = gt_segment_id % self.max_instances_per_category + gt_areas[gt_category, gt_instance] += intersection_area + + pred_segment_id = intersection_id % self.offset + pred_category = pred_segment_id // self.max_instances_per_category + pred_instance = pred_segment_id % self.max_instances_per_category + pred_areas[pred_category, pred_instance] += intersection_area + if pred_category != gt_category: + continue + + intersections[gt_category, gt_instance].append((pred_instance, + intersection_area)) + + # Find maximum IoU for every groundtruth instance. + for gt_label, instance_intersections in six.iteritems(intersections): + category, gt_instance = gt_label + gt_area = gt_areas[category, gt_instance] + ious = [] + for pred_instance, intersection_area in instance_intersections: + pred_area = pred_areas[category, pred_instance] + union = gt_area + pred_area - intersection_area + ious.append(intersection_area / union) + max_ious[category, gt_instance] = max(ious) + + # Normalize groundtruth instance areas by image size if necessary. + if self.normalize_by_image_size: + gt_areas /= groundtruth_category_array.size + + # Compute per-class weighted IoUs and areas summed over all groundtruth + # instances. + self.weighted_iou_per_class += np.sum(max_ious * gt_areas, axis=-1) + self.gt_area_per_class += np.sum(gt_areas, axis=-1) + + return self.result() + + def result_per_category(self): + """See base class.""" + return base_metric.realdiv_maybe_zero(self.weighted_iou_per_class, + self.gt_area_per_class) + + def _valid_categories(self): + """Categories with a "valid" value for the metric, have > 0 instances. + + We will ignore the `ignore_label` class and other classes which have + groundtruth area of 0. + + Returns: + Boolean array of shape `[num_categories]`. + """ + valid_categories = np.not_equal(self.gt_area_per_class, 0) + if self.ignored_label >= 0 and self.ignored_label < self.num_categories: + valid_categories[self.ignored_label] = False + return valid_categories + + def detailed_results(self, is_thing=None): + """See base class.""" + valid_categories = self._valid_categories() + + # If known, break down which categories are valid _and_ things/stuff. + category_sets = collections.OrderedDict() + category_sets['All'] = valid_categories + if is_thing is not None: + category_sets['Things'] = np.logical_and(valid_categories, is_thing) + category_sets['Stuff'] = np.logical_and(valid_categories, + np.logical_not(is_thing)) + + covering_per_class = self.result_per_category() + results = {} + for category_set_name, in_category_set in six.iteritems(category_sets): + if np.any(in_category_set): + results[category_set_name] = { + 'pc': np.mean(covering_per_class[in_category_set]), + # The number of valid categories in this subset. + 'n': np.sum(in_category_set.astype(np.int32)), + } + else: + results[category_set_name] = {'pc': 0, 'n': 0} + + return results + + def print_detailed_results(self, is_thing=None, print_digits=3): + """See base class.""" + results = self.detailed_results(is_thing=is_thing) + + tab = prettytable.PrettyTable() + + tab.add_column('', [], align='l') + for fieldname in ['PC', 'N']: + tab.add_column(fieldname, [], align='r') + + for category_set, subset_results in six.iteritems(results): + data_cols = [ + round(subset_results['pc'], print_digits) * 100, subset_results['n'] + ] + tab.add_row([category_set] + data_cols) + + print(tab) + + def result(self): + """See base class.""" + covering_per_class = self.result_per_category() + valid_categories = self._valid_categories() + if not np.any(valid_categories): + return 0. + return np.mean(covering_per_class[valid_categories]) + + def merge(self, other_instance): + """See base class.""" + self.weighted_iou_per_class += other_instance.weighted_iou_per_class + self.gt_area_per_class += other_instance.gt_area_per_class + + def reset(self): + """See base class.""" + self.weighted_iou_per_class = np.zeros( + self.num_categories, dtype=np.float64) + self.gt_area_per_class = np.zeros(self.num_categories, dtype=np.float64) diff --git a/deeplab/models/research/deeplab/evaluation/parsing_covering_test.py b/deeplab/models/research/deeplab/evaluation/parsing_covering_test.py new file mode 100644 index 0000000..124d1b3 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/parsing_covering_test.py @@ -0,0 +1,173 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Parsing Covering metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from absl.testing import absltest +import numpy as np + +from deeplab.evaluation import parsing_covering +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class CoveringConveringTest(absltest.TestCase): + + def test_perfect_match(self): + categories = np.zeros([6, 6], np.uint16) + instances = np.array([ + [2, 2, 2, 2, 2, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 2, 2, 2], + [2, 4, 2, 2, 2, 2], + ], + dtype=np.uint16) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=2, + max_instances_per_category=2, + offset=16, + normalize_by_image_size=False) + pc.compare_and_accumulate(categories, instances, categories, instances) + np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 21.0, 0.0]) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 21.0, 0.0]) + np.testing.assert_array_equal(pc.result_per_category(), [0.0, 1.0, 0.0]) + self.assertEqual(pc.result(), 1.0) + + def test_totally_wrong(self): + categories = np.zeros([6, 6], np.uint16) + gt_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + pred_instances = 1 - gt_instances + + pc = parsing_covering.ParsingCovering( + num_categories=2, + ignored_label=0, + max_instances_per_category=1, + offset=16, + normalize_by_image_size=False) + pc.compare_and_accumulate(categories, gt_instances, categories, + pred_instances) + np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 0.0]) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 10.0]) + np.testing.assert_array_equal(pc.result_per_category(), [0.0, 0.0]) + self.assertEqual(pc.result(), 0.0) + + def test_matches_expected(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256, + normalize_by_image_size=False) + pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pc.weighted_iou_per_class, [0.0, 39864.14634, 3136], decimal=4) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 56870, 5800]) + np.testing.assert_array_almost_equal( + pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) + self.assertAlmostEqual(pc.result(), 0.6208296732) + + def test_matches_expected_normalize_by_size(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256, + normalize_by_image_size=True) + pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pc.weighted_iou_per_class, [0.0, 0.5002088756, 0.03935002196], + decimal=4) + np.testing.assert_array_almost_equal( + pc.gt_area_per_class, [0.0, 0.7135955832, 0.07277746408], decimal=4) + # Note that the per-category and overall PCs are identical to those without + # normalization in the previous test, because we only have a single image. + np.testing.assert_array_almost_equal( + pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) + self.assertAlmostEqual(pc.result(), 0.6208296732) + + +if __name__ == '__main__': + absltest.main() diff --git a/deeplab/models/research/deeplab/evaluation/streaming_metrics.py b/deeplab/models/research/deeplab/evaluation/streaming_metrics.py new file mode 100644 index 0000000..8313792 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/streaming_metrics.py @@ -0,0 +1,240 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code to compute segmentation in a "streaming" pattern in Tensorflow. + +These aggregate the metric over examples of the evaluation set. Each example is +assumed to be fed in in a stream, and the metric implementation accumulates +across them. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import parsing_covering + +_EPSILON = 1e-10 + + +def _realdiv_maybe_zero(x, y): + """Support tf.realdiv(x, y) where y may contain zeros.""" + return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y)) + + +def _running_total(value, shape, name=None): + """Maintains a running total of tensor `value` between calls.""" + with tf.variable_scope(name, 'running_total', [value]): + total_var = tf.get_variable( + 'total', + shape, + value.dtype, + initializer=tf.zeros_initializer(), + trainable=False, + collections=[ + tf.GraphKeys.LOCAL_VARIABLES, tf.GraphKeys.METRIC_VARIABLES + ]) + updated_total = tf.assign_add(total_var, value, use_locking=True) + + return total_var, updated_total + + +def _panoptic_quality_helper( + groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array, num_classes, + max_instances_per_category, ignored_label, offset): + """Helper function to compute panoptic quality.""" + pq = panoptic_quality.PanopticQuality(num_classes, ignored_label, + max_instances_per_category, offset) + pq.compare_and_accumulate(groundtruth_category_array, + groundtruth_instance_array, + predicted_category_array, predicted_instance_array) + return pq.iou_per_class, pq.tp_per_class, pq.fn_per_class, pq.fp_per_class + + +def streaming_panoptic_quality(groundtruth_categories, + groundtruth_instances, + predicted_categories, + predicted_instances, + num_classes, + max_instances_per_category, + ignored_label, + offset, + name=None): + """Aggregates the panoptic metric across calls with different input tensors. + + See tf.metrics.* functions for comparable functionality and usage. + + Args: + groundtruth_categories: A 2D uint16 tensor of groundtruth category labels. + groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels. + predicted_categories: A 2D uint16 tensor of predicted category labels. + predicted_instances: A 2D uint16 tensor of predicted instance labels. + num_classes: Number of classes in the dataset as an integer. + max_instances_per_category: The maximum number of instances for each class + as an integer or integer tensor. + ignored_label: The class id to be ignored in evaluation as an integer or + integer tensor. + offset: The maximum number of unique labels as an integer or integer tensor. + name: An optional variable_scope name. + + Returns: + qualities: A tensor of shape `[6, num_classes]`, where (1) panoptic quality, + (2) segmentation quality, (3) recognition quality, (4) total_tp, + (5) total_fn and (6) total_fp are saved in the respective rows. + update_ops: List of operations that update the running overall panoptic + quality. + + Raises: + RuntimeError: If eager execution is enabled. + """ + if tf.executing_eagerly(): + raise RuntimeError('Cannot aggregate when eager execution is enabled.') + + input_args = [ + tf.convert_to_tensor(groundtruth_categories, tf.uint16), + tf.convert_to_tensor(groundtruth_instances, tf.uint16), + tf.convert_to_tensor(predicted_categories, tf.uint16), + tf.convert_to_tensor(predicted_instances, tf.uint16), + tf.convert_to_tensor(num_classes, tf.int32), + tf.convert_to_tensor(max_instances_per_category, tf.int32), + tf.convert_to_tensor(ignored_label, tf.int32), + tf.convert_to_tensor(offset, tf.int32), + ] + return_types = [ + tf.float64, + tf.float64, + tf.float64, + tf.float64, + ] + with tf.variable_scope(name, 'streaming_panoptic_quality', input_args): + panoptic_results = tf.py_func( + _panoptic_quality_helper, input_args, return_types, stateful=False) + iou, tp, fn, fp = tuple(panoptic_results) + + total_iou, updated_iou = _running_total( + iou, [num_classes], name='iou_total') + total_tp, updated_tp = _running_total(tp, [num_classes], name='tp_total') + total_fn, updated_fn = _running_total(fn, [num_classes], name='fn_total') + total_fp, updated_fp = _running_total(fp, [num_classes], name='fp_total') + update_ops = [updated_iou, updated_tp, updated_fn, updated_fp] + + sq = _realdiv_maybe_zero(total_iou, total_tp) + rq = _realdiv_maybe_zero(total_tp, + total_tp + 0.5 * total_fn + 0.5 * total_fp) + pq = tf.multiply(sq, rq) + qualities = tf.stack([pq, sq, rq, total_tp, total_fn, total_fp], axis=0) + return qualities, update_ops + + +def _parsing_covering_helper( + groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array, num_classes, + max_instances_per_category, ignored_label, offset, normalize_by_image_size): + """Helper function to compute parsing covering.""" + pc = parsing_covering.ParsingCovering(num_classes, ignored_label, + max_instances_per_category, offset, + normalize_by_image_size) + pc.compare_and_accumulate(groundtruth_category_array, + groundtruth_instance_array, + predicted_category_array, predicted_instance_array) + return pc.weighted_iou_per_class, pc.gt_area_per_class + + +def streaming_parsing_covering(groundtruth_categories, + groundtruth_instances, + predicted_categories, + predicted_instances, + num_classes, + max_instances_per_category, + ignored_label, + offset, + normalize_by_image_size=True, + name=None): + """Aggregates the covering across calls with different input tensors. + + See tf.metrics.* functions for comparable functionality and usage. + + Args: + groundtruth_categories: A 2D uint16 tensor of groundtruth category labels. + groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels. + predicted_categories: A 2D uint16 tensor of predicted category labels. + predicted_instances: A 2D uint16 tensor of predicted instance labels. + num_classes: Number of classes in the dataset as an integer. + max_instances_per_category: The maximum number of instances for each class + as an integer or integer tensor. + ignored_label: The class id to be ignored in evaluation as an integer or + integer tensor. + offset: The maximum number of unique labels as an integer or integer tensor. + normalize_by_image_size: Whether to normalize groundtruth region areas by + image size. If True, groundtruth instance areas and weighted IoUs will be + divided by the size of the corresponding image before accumulated across + the dataset. + name: An optional variable_scope name. + + Returns: + coverings: A tensor of shape `[3, num_classes]`, where (1) per class + coverings, (2) per class sum of weighted IoUs, and (3) per class sum of + groundtruth region areas are saved in the perspective rows. + update_ops: List of operations that update the running overall parsing + covering. + + Raises: + RuntimeError: If eager execution is enabled. + """ + if tf.executing_eagerly(): + raise RuntimeError('Cannot aggregate when eager execution is enabled.') + + input_args = [ + tf.convert_to_tensor(groundtruth_categories, tf.uint16), + tf.convert_to_tensor(groundtruth_instances, tf.uint16), + tf.convert_to_tensor(predicted_categories, tf.uint16), + tf.convert_to_tensor(predicted_instances, tf.uint16), + tf.convert_to_tensor(num_classes, tf.int32), + tf.convert_to_tensor(max_instances_per_category, tf.int32), + tf.convert_to_tensor(ignored_label, tf.int32), + tf.convert_to_tensor(offset, tf.int32), + tf.convert_to_tensor(normalize_by_image_size, tf.bool), + ] + return_types = [ + tf.float64, + tf.float64, + ] + with tf.variable_scope(name, 'streaming_parsing_covering', input_args): + covering_results = tf.py_func( + _parsing_covering_helper, input_args, return_types, stateful=False) + weighted_iou_per_class, gt_area_per_class = tuple(covering_results) + + total_weighted_iou_per_class, updated_weighted_iou_per_class = ( + _running_total( + weighted_iou_per_class, [num_classes], + name='weighted_iou_per_class_total')) + total_gt_area_per_class, updated_gt_area_per_class = _running_total( + gt_area_per_class, [num_classes], name='gt_area_per_class_total') + + covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class, + total_gt_area_per_class) + coverings = tf.stack([ + covering_per_class, + total_weighted_iou_per_class, + total_gt_area_per_class, + ], + axis=0) + update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class] + + return coverings, update_ops diff --git a/deeplab/models/research/deeplab/evaluation/streaming_metrics_test.py b/deeplab/models/research/deeplab/evaluation/streaming_metrics_test.py new file mode 100644 index 0000000..656007e --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/streaming_metrics_test.py @@ -0,0 +1,549 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for segmentation "streaming" metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + + + +import numpy as np +import six +import tensorflow as tf + +from deeplab.evaluation import streaming_metrics +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class StreamingPanopticQualityTest(tf.test.TestCase): + + def test_streaming_metric_on_single_image(self): + offset = 256 * 256 + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + qualities, update_pq = streaming_metrics.streaming_panoptic_quality( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=3, + max_instances_per_category=256, + ignored_label=0, + offset=offset) + pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0) + feed_dict = { + gt_class_tensor: gt_classes, + gt_instance_tensor: gt_instances, + pred_class_tensor: pred_classes, + pred_instance_tensor: pred_instances + } + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + sess.run(update_pq, feed_dict=feed_dict) + (result_pq, result_sq, result_rq, result_total_tp, result_total_fn, + result_total_fp) = sess.run([pq, sq, rq, total_tp, total_fn, total_fp], + feed_dict=feed_dict) + np.testing.assert_array_almost_equal( + result_pq, [2.06104, 0.7024, 0.54069], decimal=4) + np.testing.assert_array_almost_equal( + result_sq, [2.06104, 0.7526, 0.54069], decimal=4) + np.testing.assert_array_almost_equal(result_rq, [1., 0.9333, 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_tp, [1., 7., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fn, [0., 1., 0.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fp, [0., 0., 0.], decimal=4) + + def test_streaming_metric_on_multiple_images(self): + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + qualities, update_pq = streaming_metrics.streaming_panoptic_quality( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset) + pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0) + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_pq, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_pq, result_sq, result_rq, result_total_tp, result_total_fn, + result_total_fp) = sess.run( + [pq, sq, rq, total_tp, total_fn, total_fp], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + }) + np.testing.assert_array_almost_equal( + result_pq, + [4.3107, 0.7024, 0.54069, 0.745353, 0.85768, 0.99107, 0.77410], + decimal=4) + np.testing.assert_array_almost_equal( + result_sq, [5.3883, 0.7526, 0.5407, 0.7454, 0.8577, 0.9911, 0.7741], + decimal=4) + np.testing.assert_array_almost_equal( + result_rq, [0.8, 0.9333, 1., 1., 1., 1., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_tp, [2., 7., 1., 1., 1., 1., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fn, [0., 1., 0., 0., 0., 0., 0.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fp, [1., 0., 0., 0., 0., 0., 0.], decimal=4) + + +class StreamingParsingCoveringTest(tf.test.TestCase): + + def test_streaming_metric_on_single_image(self): + offset = 256 * 256 + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=3, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=False) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + feed_dict = { + gt_class_tensor: gt_classes, + gt_instance_tensor: gt_instances, + pred_class_tensor: pred_classes, + pred_instance_tensor: pred_instances + } + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + sess.run(update_ops, feed_dict=feed_dict) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run([ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict=feed_dict)) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [0.0, 0.7009696912, 0.5406896552], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [0.0, 39864.14634, 3136], decimal=4) + np.testing.assert_array_equal(result_per_class_gt_areas, [0, 56870, 5800]) + + def test_streaming_metric_on_multiple_images(self): + """Tests streaming parsing covering metric.""" + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=False) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_ops, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run( + [ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + })) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [ + 0.0, + 0.7009696912, + 0.5406896552, + 0.7453531599, + 0.8576779026, + 0.9910687881, + 0.7741046032, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [ + 0.0, + 39864.14634, + 3136, + 1177.657993, + 2498.41573, + 33366.31289, + 26671, + ], + decimal=4) + np.testing.assert_array_equal(result_per_class_gt_areas, [ + 0.0, + 56870, + 5800, + 1580, + 2913, + 33667, + 34454, + ]) + + def test_streaming_metric_on_multiple_images_normalize_by_size(self): + """Tests streaming parsing covering metric with image size normalization.""" + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=True) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_ops, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run( + [ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + })) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [ + 0.0, + 0.7009696912, + 0.5406896552, + 0.7453531599, + 0.8576779026, + 0.9910687881, + 0.7741046032, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [ + 0.0, + 0.5002088756, + 0.03935002196, + 0.03086105851, + 0.06547211033, + 0.8743792686, + 0.2549565051, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_gt_areas, [ + 0.0, + 0.7135955832, + 0.07277746408, + 0.04140461216, + 0.07633647799, + 0.8822589099, + 0.3293566581, + ], + decimal=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/evaluation/test_utils.py b/deeplab/models/research/deeplab/evaluation/test_utils.py new file mode 100644 index 0000000..9ad4f55 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/test_utils.py @@ -0,0 +1,119 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions to set up unit tests on Panoptic Segmentation code.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + + + +from absl import flags +import numpy as np +import scipy.misc +import six +from six.moves import map + +FLAGS = flags.FLAGS + +_TEST_DIR = 'deeplab/evaluation/testdata' + + +def read_test_image(testdata_path, *args, **kwargs): + """Loads a test image. + + Args: + testdata_path: Image path relative to panoptic_segmentation/testdata as a + string. + *args: Additional positional arguments passed to `imread`. + **kwargs: Additional keyword arguments passed to `imread`. + + Returns: + The image, as a numpy array. + """ + image_path = os.path.join(_TEST_DIR, testdata_path) + return scipy.misc.imread(image_path, *args, **kwargs) + + +def read_segmentation_with_rgb_color_map(image_testdata_path, + rgb_to_semantic_label, + output_dtype=None): + """Reads a test segmentation as an image and a map from colors to labels. + + Args: + image_testdata_path: Image path relative to panoptic_segmentation/testdata + as a string. + rgb_to_semantic_label: Mapping from RGB colors to integer labels as a + dictionary. + output_dtype: Type of the output labels. If None, defaults to the type of + the provided color map. + + Returns: + A 2D numpy array of labels. + + Raises: + ValueError: On an incomplete `rgb_to_semantic_label`. + """ + rgb_image = read_test_image(image_testdata_path, mode='RGB') + if len(rgb_image.shape) != 3 or rgb_image.shape[2] != 3: + raise AssertionError( + 'Expected RGB image, actual shape is %s' % rgb_image.sape) + + num_pixels = rgb_image.shape[0] * rgb_image.shape[1] + unique_colors = np.unique(np.reshape(rgb_image, [num_pixels, 3]), axis=0) + if not set(map(tuple, unique_colors)).issubset( + six.viewkeys(rgb_to_semantic_label)): + raise ValueError('RGB image has colors not in color map.') + + output_dtype = output_dtype or type( + next(six.itervalues(rgb_to_semantic_label))) + output_labels = np.empty(rgb_image.shape[:2], dtype=output_dtype) + for rgb_color, int_label in six.iteritems(rgb_to_semantic_label): + color_array = np.array(rgb_color, ndmin=3) + output_labels[np.all(rgb_image == color_array, axis=2)] = int_label + return output_labels + + +def panoptic_segmentation_with_class_map(instance_testdata_path, + instance_label_to_semantic_label): + """Reads in a panoptic segmentation with an instance map and a map to classes. + + Args: + instance_testdata_path: Path to a grayscale instance map, given as a string + and relative to panoptic_segmentation/testdata. + instance_label_to_semantic_label: A map from instance labels to class + labels. + + Returns: + A tuple `(instance_labels, class_labels)` of numpy arrays. + + Raises: + ValueError: On a mismatched set of instances in + the + `instance_label_to_semantic_label`. + """ + instance_labels = read_test_image(instance_testdata_path, mode='L') + if set(np.unique(instance_labels)) != set( + six.iterkeys(instance_label_to_semantic_label)): + raise ValueError('Provided class map does not match present instance ids.') + + class_labels = np.empty_like(instance_labels) + for instance_id, class_id in six.iteritems(instance_label_to_semantic_label): + class_labels[instance_labels == instance_id] = class_id + + return instance_labels, class_labels diff --git a/deeplab/models/research/deeplab/evaluation/test_utils_test.py b/deeplab/models/research/deeplab/evaluation/test_utils_test.py new file mode 100644 index 0000000..9e9bed3 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/test_utils_test.py @@ -0,0 +1,74 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for test_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from absl.testing import absltest +import numpy as np + +from deeplab.evaluation import test_utils + + +class TestUtilsTest(absltest.TestCase): + + def test_read_test_image(self): + image_array = test_utils.read_test_image('team_pred_class.png') + self.assertSequenceEqual(image_array.shape, (231, 345, 4)) + + def test_reads_segmentation_with_color_map(self): + rgb_to_semantic_label = {(0, 0, 0): 0, (0, 0, 255): 1, (255, 0, 0): 23} + labels = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', rgb_to_semantic_label) + + input_image = test_utils.read_test_image('team_pred_class.png') + np.testing.assert_array_equal( + labels == 0, + np.logical_and(input_image[:, :, 0] == 0, input_image[:, :, 2] == 0)) + np.testing.assert_array_equal(labels == 1, input_image[:, :, 2] == 255) + np.testing.assert_array_equal(labels == 23, input_image[:, :, 0] == 255) + + def test_reads_gt_segmentation(self): + instance_label_to_semantic_label = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 23, + 215: 1, + 244: 1, + 255: 1, + } + instances, classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_label_to_semantic_label) + + expected_label_shape = (231, 345) + self.assertSequenceEqual(instances.shape, expected_label_shape) + self.assertSequenceEqual(classes.shape, expected_label_shape) + np.testing.assert_array_equal(instances == 0, classes == 0) + np.testing.assert_array_equal(instances == 198, classes == 23) + np.testing.assert_array_equal( + np.logical_and(instances != 0, instances != 198), classes == 1) + + +if __name__ == '__main__': + absltest.main() diff --git a/deeplab/models/research/deeplab/evaluation/testdata/README.md b/deeplab/models/research/deeplab/evaluation/testdata/README.md new file mode 100644 index 0000000..711b476 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/testdata/README.md @@ -0,0 +1,14 @@ +# Segmentation Evalaution Test Data + +## Source Images + +* [team_input.png](team_input.png) \ + Source: + https://ai.googleblog.com/2018/03/semantic-image-segmentation-with.html +* [cat_input.jpg](cat_input.jpg) \ + Source: https://www.flickr.com/photos/magdalena_b/4995858743 +* [bird_input.jpg](bird_input.jpg) \ + Source: https://www.flickr.com/photos/chivinskia/40619099560 +* [congress_input.jpg](congress_input.jpg) \ + Source: + https://cao.house.gov/sites/cao.house.gov/files/documents/SAR-Jan-Jun-2016.pdf diff --git a/deeplab/models/research/deeplab/evaluation/testdata/bird_gt.png b/deeplab/models/research/deeplab/evaluation/testdata/bird_gt.png new file mode 100644 index 0000000..05d8549 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/bird_gt.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_class.png b/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_class.png new file mode 100644 index 0000000..07351bf Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_class.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_instance.png b/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_instance.png new file mode 100644 index 0000000..faa1371 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/bird_pred_instance.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/cat_gt.png b/deeplab/models/research/deeplab/evaluation/testdata/cat_gt.png new file mode 100644 index 0000000..41f6011 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/cat_gt.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_class.png b/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_class.png new file mode 100644 index 0000000..3728c68 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_class.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_instance.png b/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_instance.png new file mode 100644 index 0000000..ebd9ba4 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/cat_pred_instance.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_gt.json b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt.json new file mode 100644 index 0000000..5f79bf1 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt.json @@ -0,0 +1,214 @@ +{ + "info": { + "description": "Test COCO-format dataset", + "url": "https://github.com/tensorflow/models/tree/master/research/deeplab", + "version": "1.0", + "year": 2019 + }, + "images": [ + { + "id": 1, + "file_name": "bird.jpg", + "height": 159, + "width": 240, + "flickr_url": "https://www.flickr.com/photos/chivinskia/40619099560" + }, + { + "id": 2, + "file_name": "cat.jpg", + "height": 330, + "width": 317, + "flickr_url": "https://www.flickr.com/photos/magdalena_b/4995858743" + }, + { + "id": 3, + "file_name": "team.jpg", + "height": 231, + "width": 345 + }, + { + "id": 4, + "file_name": "congress.jpg", + "height": 267, + "width": 525 + } + ], + "annotations": [ + { + "image_id": 1, + "file_name": "bird.png", + "segments_info": [ + { + "id": 255, + "area": 2913, + "category_id": 4, + "iscrowd": 0 + }, + { + "id": 2586368, + "area": 1580, + "category_id": 3, + "iscrowd": 0 + }, + { + "id": 16770360, + "area": 33667, + "category_id": 5, + "iscrowd": 0 + } + ] + }, + { + "image_id": 2, + "file_name": "cat.png", + "segments_info": [ + { + "id": 16711691, + "area": 34454, + "category_id": 6, + "iscrowd": 0 + } + ] + }, + { + "image_id": 3, + "file_name": "team.png", + "segments_info": [ + { + "id": 129, + "area": 5443, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 255, + "area": 3574, + "category_id": 2, + "iscrowd": 0 + }, + { + "id": 47615, + "area": 11483, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65532, + "area": 7080, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 8585107, + "area": 11363, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 9011200, + "area": 7158, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 12858027, + "area": 6419, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16053492, + "area": 4350, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711680, + "area": 5800, + "category_id": 1, + "iscrowd": 0 + } + ] + }, + { + "image_id": 4, + "file_name": "congress.png", + "segments_info": [ + { + "id": 255, + "area": 243, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65315, + "area": 553, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65516, + "area": 652, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 9895680, + "area": 82774, + "category_id": 1, + "iscrowd": 1 + }, + { + "id": 16711739, + "area": 137, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711868, + "area": 179, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16762624, + "area": 2742, + "category_id": 1, + "iscrowd": 0 + } + ] + } + ], + "categories": [ + { + "id": 1, + "name": "person", + "isthing": 1 + }, + { + "id": 2, + "name": "umbrella", + "isthing": 1 + }, + { + "id": 3, + "name": "tree-merged", + "isthing": 0 + }, + { + "id": 4, + "name": "bird", + "isthing": 1 + }, + { + "id": 5, + "name": "sky", + "isthing": 0 + }, + { + "id": 6, + "name": "cat", + "isthing": 1 + } + ] +} diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/bird.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/bird.png new file mode 100644 index 0000000..9ef4ad9 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/bird.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/cat.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/cat.png new file mode 100644 index 0000000..cb02530 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/cat.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/congress.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/congress.png new file mode 100644 index 0000000..a56b98d Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/congress.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/team.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/team.png new file mode 100644 index 0000000..bde358d Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_gt/team.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_pred.json b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred.json new file mode 100644 index 0000000..4aead17 --- /dev/null +++ b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred.json @@ -0,0 +1,208 @@ +{ + "info": { + "description": "Test COCO-format dataset", + "url": "https://github.com/tensorflow/models/tree/master/research/deeplab", + "version": "1.0", + "year": 2019 + }, + "images": [ + { + "id": 1, + "file_name": "bird.jpg", + "height": 159, + "width": 240, + "flickr_url": "https://www.flickr.com/photos/chivinskia/40619099560" + }, + { + "id": 2, + "file_name": "cat.jpg", + "height": 330, + "width": 317, + "flickr_url": "https://www.flickr.com/photos/magdalena_b/4995858743" + }, + { + "id": 3, + "file_name": "team.jpg", + "height": 231, + "width": 345 + }, + { + "id": 4, + "file_name": "congress.jpg", + "height": 267, + "width": 525 + } + ], + "annotations": [ + { + "image_id": 1, + "file_name": "bird.png", + "segments_info": [ + { + "id": 55551, + "area": 3039, + "category_id": 4, + "iscrowd": 0 + }, + { + "id": 16216831, + "area": 33659, + "category_id": 5, + "iscrowd": 0 + }, + { + "id": 16760832, + "area": 1237, + "category_id": 3, + "iscrowd": 0 + } + ] + }, + { + "image_id": 2, + "file_name": "cat.png", + "segments_info": [ + { + "id": 36493, + "area": 26910, + "category_id": 6, + "iscrowd": 0 + } + ] + }, + { + "image_id": 3, + "file_name": "team.png", + "segments_info": [ + { + "id": 0, + "area": 22164, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 129, + "area": 3418, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 255, + "area": 12827, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 740608, + "area": 8606, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 2555695, + "area": 7636, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 2883541, + "area": 6844, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 14408667, + "area": 4766, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711820, + "area": 4767, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16768768, + "area": 8667, + "category_id": 1, + "iscrowd": 0 + } + ] + }, + { + "image_id": 4, + "file_name": "congress.png", + "segments_info": [ + { + "id": 255, + "area": 2599, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 37375, + "area": 386, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 62207, + "area": 384, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 5177088, + "area": 260, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711691, + "area": 1011, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16774912, + "area": 803, + "category_id": 1, + "iscrowd": 0 + } + ] + } + ], + "categories": [ + { + "id": 1, + "name": "person", + "isthing": 1 + }, + { + "id": 2, + "name": "umbrella", + "isthing": 1 + }, + { + "id": 3, + "name": "tree-merged", + "isthing": 0 + }, + { + "id": 4, + "name": "bird", + "isthing": 1 + }, + { + "id": 5, + "name": "sky", + "isthing": 0 + }, + { + "id": 6, + "name": "cat", + "isthing": 1 + } + ] +} diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/bird.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/bird.png new file mode 100644 index 0000000..c9b4cbc Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/bird.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/cat.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/cat.png new file mode 100644 index 0000000..3245832 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/cat.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/congress.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/congress.png new file mode 100644 index 0000000..fc7bb06 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/congress.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/team.png b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/team.png new file mode 100644 index 0000000..7300bf4 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/coco_pred/team.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/team_gt_instance.png b/deeplab/models/research/deeplab/evaluation/testdata/team_gt_instance.png new file mode 100644 index 0000000..97abb55 Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/team_gt_instance.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/team_pred_class.png b/deeplab/models/research/deeplab/evaluation/testdata/team_pred_class.png new file mode 100644 index 0000000..2ed78de Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/team_pred_class.png differ diff --git a/deeplab/models/research/deeplab/evaluation/testdata/team_pred_instance.png b/deeplab/models/research/deeplab/evaluation/testdata/team_pred_instance.png new file mode 100644 index 0000000..264606a Binary files /dev/null and b/deeplab/models/research/deeplab/evaluation/testdata/team_pred_instance.png differ diff --git a/deeplab/models/research/deeplab/export_model.py b/deeplab/models/research/deeplab/export_model.py new file mode 100644 index 0000000..b7307b5 --- /dev/null +++ b/deeplab/models/research/deeplab/export_model.py @@ -0,0 +1,201 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports trained model to TensorFlow frozen graph.""" + +import os +import tensorflow as tf + +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.python.tools import freeze_graph +from deeplab import common +from deeplab import input_preprocess +from deeplab import model + +slim = tf.contrib.slim +flags = tf.app.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path') + +flags.DEFINE_string('export_path', None, + 'Path to output Tensorflow frozen graph.') + +flags.DEFINE_integer('num_classes', 21, 'Number of classes.') + +flags.DEFINE_multi_integer('crop_size', [513, 513], + 'Crop size [height, width].') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 8, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale inference. +flags.DEFINE_multi_float('inference_scales', [1.0], + 'The scales to resize images for inference.') + +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images during inference or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +flags.DEFINE_bool('save_inference_graph', False, + 'Save inference graph in text proto.') + +# Input name of the exported model. +_INPUT_NAME = 'ImageTensor' + +# Output name of the exported predictions. +_OUTPUT_NAME = 'SemanticPredictions' +_RAW_OUTPUT_NAME = 'RawSemanticPredictions' + +# Output name of the exported probabilities. +_OUTPUT_PROB_NAME = 'SemanticProbabilities' +_RAW_OUTPUT_PROB_NAME = 'RawSemanticProbabilities' + + +def _create_input_tensors(): + """Creates and prepares input tensors for DeepLab model. + + This method creates a 4-D uint8 image tensor 'ImageTensor' with shape + [1, None, None, 3]. The actual input tensor name to use during inference is + 'ImageTensor:0'. + + Returns: + image: Preprocessed 4-D float32 tensor with shape [1, crop_height, + crop_width, 3]. + original_image_size: Original image shape tensor [height, width]. + resized_image_size: Resized image shape tensor [height, width]. + """ + # input_preprocess takes 4-D image tensor as input. + input_image = tf.placeholder(tf.uint8, [1, None, None, 3], name=_INPUT_NAME) + original_image_size = tf.shape(input_image)[1:3] + + # Squeeze the dimension in axis=0 since `preprocess_image_and_label` assumes + # image to be 3-D. + image = tf.squeeze(input_image, axis=0) + resized_image, image, _ = input_preprocess.preprocess_image_and_label( + image, + label=None, + crop_height=FLAGS.crop_size[0], + crop_width=FLAGS.crop_size[1], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + is_training=False, + model_variant=FLAGS.model_variant) + resized_image_size = tf.shape(resized_image)[:2] + + # Expand the dimension in axis=0, since the following operations assume the + # image to be 4-D. + image = tf.expand_dims(image, 0) + + return image, original_image_size, resized_image_size + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.info('Prepare to export model to: %s', FLAGS.export_path) + + with tf.Graph().as_default(): + image, image_size, resized_image_size = _create_input_tensors() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes}, + crop_size=FLAGS.crop_size, + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + if tuple(FLAGS.inference_scales) == (1.0,): + tf.logging.info('Exported model performs single-scale inference.') + predictions = model.predict_labels( + image, + model_options=model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Exported model performs multi-scale inference.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + predictions = model.predict_labels_multi_scale( + image, + model_options=model_options, + eval_scales=FLAGS.inference_scales, + add_flipped_images=FLAGS.add_flipped_images) + raw_predictions = tf.identity( + tf.cast(predictions[common.OUTPUT_TYPE], tf.float32), + _RAW_OUTPUT_NAME) + raw_probabilities = tf.identity( + predictions[common.OUTPUT_TYPE + model.PROB_SUFFIX], + _RAW_OUTPUT_PROB_NAME) + + # Crop the valid regions from the predictions. + semantic_predictions = raw_predictions[ + :, :resized_image_size[0], :resized_image_size[1]] + semantic_probabilities = raw_probabilities[ + :, :resized_image_size[0], :resized_image_size[1]] + + # Resize back the prediction to the original image size. + def _resize_label(label, label_size): + # Expand dimension of label to [1, height, width, 1] for resize operation. + label = tf.expand_dims(label, 3) + resized_label = tf.image.resize_images( + label, + label_size, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + return tf.cast(tf.squeeze(resized_label, 3), tf.int32) + semantic_predictions = _resize_label(semantic_predictions, image_size) + semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME) + + semantic_probabilities = tf.image.resize_bilinear( + semantic_probabilities, image_size, align_corners=True, + name=_OUTPUT_PROB_NAME) + + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + saver = tf.train.Saver(tf.all_variables()) + + dirname = os.path.dirname(FLAGS.export_path) + tf.gfile.MakeDirs(dirname) + graph_def = tf.get_default_graph().as_graph_def(add_shapes=True) + freeze_graph.freeze_graph_with_def_protos( + graph_def, + saver.as_saver_def(), + FLAGS.checkpoint_path, + _OUTPUT_NAME + ',' + _OUTPUT_PROB_NAME, + restore_op_name=None, + filename_tensor_name=None, + output_graph=FLAGS.export_path, + clear_devices=True, + initializer_nodes=None) + + if FLAGS.save_inference_graph: + tf.train.write_graph(graph_def, dirname, 'inference_graph.pbtxt') + + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_path') + flags.mark_flag_as_required('export_path') + tf.app.run() diff --git a/deeplab/models/research/deeplab/g3doc/ade20k.md b/deeplab/models/research/deeplab/g3doc/ade20k.md new file mode 100644 index 0000000..9505ab2 --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/ade20k.md @@ -0,0 +1,107 @@ +# Running DeepLab on ADE20K Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on ADE20K dataset on a +local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to download and +convert ADE20K semantic segmentation dataset to TFRecord. + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +bash download_and_convert_ade20k.sh +``` + +The converted dataset will be saved at ./deeplab/datasets/ADE20K/tfrecord + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + - build_data.py + - build_ade20k_data.py + - download_and_convert_ade20k.sh + + ADE20K + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis + + ADEChallengeData2016 + + annotations + + training + + validation + + images + + training + + validation +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the ADE20K train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=150000 \ + --train_split="train" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --min_resize_value=513 \ + --max_resize_value=513 \ + --resize_factor=16 \ + --dataset="ade20k" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR}\ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH\_TO\_INITIAL\_CHECKPOINT} is the path to the initial checkpoint. +${PATH\_TO\_TRAIN\_DIR} is the directory in which training checkpoints and +events will be written to (it is recommended to set it to the +`train_on_train_set/train` above), and ${PATH\_TO\_DATASET} is the directory in +which the ADE20K dataset resides (the `tfrecord` above) + +**Note that for train.py:** + +1. In order to fine tune the BN layers, one needs to use large batch size (> + 12), and set fine_tune_batch_norm = True. Here, we simply use small batch + size during training for the purpose of demonstration. If the users have + limited GPU memory at hand, please fine-tune from our provided checkpoints + whose batch norm parameters have been trained, and use smaller learning rate + with fine_tune_batch_norm = False. + +2. User should fine tune the `min_resize_value` and `max_resize_value` to get + better result. Note that `resize_factor` has to be equal to `output_stride`. + +3. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +4. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the train +directorie (e.g., the folder `train_on_train_set` in the above example). Please +note it may take Tensorboard a couple minutes to populate with data. diff --git a/deeplab/models/research/deeplab/g3doc/cityscapes.md b/deeplab/models/research/deeplab/g3doc/cityscapes.md new file mode 100644 index 0000000..5a660aa --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/cityscapes.md @@ -0,0 +1,159 @@ +# Running DeepLab on Cityscapes Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on Cityscapes on a +local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to convert Cityscapes +dataset to TFRecord. The users are required to download the dataset beforehand +by registering the [website](https://www.cityscapes-dataset.com/). + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +sh convert_cityscapes.sh +``` + +The converted dataset will be saved at ./deeplab/datasets/cityscapes/tfrecord. + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + + cityscapes + + leftImg8bit + + gtFine + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the Cityscapes train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=90000 \ + --train_split="train_fine" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="769,769" \ + --train_batch_size=1 \ + --dataset="cityscapes" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_INITIAL_CHECKPOINT} is the path to the initial checkpoint +(usually an ImageNet pretrained checkpoint), ${PATH_TO_TRAIN_DIR} is the +directory in which training checkpoints and events will be written to, and +${PATH_TO_DATASET} is the directory in which the Cityscapes dataset resides. + +**Note that for {train,eval,vis}.py**: + +1. In order to reproduce our results, one needs to use large batch size (> 8), + and set fine_tune_batch_norm = True. Here, we simply use small batch size + during training for the purpose of demonstration. If the users have limited + GPU memory at hand, please fine-tune from our provided checkpoints whose + batch norm parameters have been trained, and use smaller learning rate with + fine_tune_batch_norm = False. + +2. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +3. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +4. Change and add the following flags in order to use the provided dense + prediction cell. Note we need to set decoder_output_stride if you want to + use the provided checkpoints which include the decoder module. + +```bash +--model_variant="xception_71" +--dense_prediction_cell_json="deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json" +--decoder_output_stride=4 +``` + +A local evaluation job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/eval.py \ + --logtostderr \ + --eval_split="val_fine" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="1025,2049" \ + --dataset="cityscapes" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --eval_logdir=${PATH_TO_EVAL_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_EVAL_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +Cityscapes dataset resides. + +A local visualization job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/vis.py \ + --logtostderr \ + --vis_split="val_fine" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="1025,2049" \ + --dataset="cityscapes" \ + --colormap_type="cityscapes" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --vis_logdir=${PATH_TO_VIS_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_VIS_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +Cityscapes dataset resides. Note that if the users would like to save the +segmentation results for evaluation server, set also_save_raw_predictions = +True. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the +train, eval, and vis directories (e.g., the folder `train_on_train_set` in the +above example). Please note it may take Tensorboard a couple minutes to populate +with data. diff --git a/deeplab/models/research/deeplab/g3doc/export_model.md b/deeplab/models/research/deeplab/g3doc/export_model.md new file mode 100644 index 0000000..c41649e --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/export_model.md @@ -0,0 +1,23 @@ +# Export trained deeplab model to frozen inference graph + +After model training finishes, you could export it to a frozen TensorFlow +inference graph proto. Your trained model checkpoint usually includes the +following files: + +* model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001, +* model.ckpt-${CHECKPOINT_NUMBER}.index +* model.ckpt-${CHECKPOINT_NUMBER}.meta + +After you have identified a candidate checkpoint to export, you can run the +following commandline to export to a frozen graph: + +```bash +# From tensorflow/models/research/ +# Assume all checkpoint files share the same path prefix `${CHECKPOINT_PATH}`. +python deeplab/export_model.py \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --export_path=${OUTPUT_DIR}/frozen_inference_graph.pb +``` + +Please also add other model specific flags as you use for training, such as +`model_variant`, `add_image_level_feature`, etc. diff --git a/deeplab/models/research/deeplab/g3doc/faq.md b/deeplab/models/research/deeplab/g3doc/faq.md new file mode 100644 index 0000000..26ff4b3 --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/faq.md @@ -0,0 +1,87 @@ +# FAQ +___ +Q1: What if I want to use other network backbones, such as ResNet [1], instead of only those provided ones (e.g., Xception)? + +A: The users could modify the provided core/feature_extractor.py to support more network backbones. +___ +Q2: What if I want to train the model on other datasets? + +A: The users could modify the provided dataset/build_{cityscapes,voc2012}_data.py and dataset/segmentation_dataset.py to build their own dataset. +___ +Q3: Where can I download the PASCAL VOC augmented training set? + +A: The PASCAL VOC augmented training set is provided by Bharath Hariharan et al. [2] Please refer to their [website](http://home.bharathh.info/pubs/codes/SBD/download.html) for details and consider citing their paper if using the dataset. +___ +Q4: Why the implementation does not include DenseCRF [3]? + +A: We have not tried this. The interested users could take a look at Philipp KrähenbĂ¼hl's [website](http://graphics.stanford.edu/projects/densecrf/) and [paper](https://arxiv.org/abs/1210.5644) for details. +___ +Q5: What if I want to train the model and fine-tune the batch normalization parameters? + +A: If given the limited resource at hand, we would suggest you simply fine-tune +from our provided checkpoint whose batch-norm parameters have been trained (i.e., +train with a smaller learning rate, set `fine_tune_batch_norm = false`, and +employ longer training iterations since the learning rate is small). If +you really would like to train by yourself, we would suggest + +1. Set `output_stride = 16` or maybe even `32` (remember to change the flag +`atrous_rates` accordingly, e.g., `atrous_rates = [3, 6, 9]` for +`output_stride = 32`). + +2. Use as many GPUs as possible (change the flag `num_clones` in train.py) and +set `train_batch_size` as large as possible. + +3. Adjust the `train_crop_size` in train.py. Maybe set it to be smaller, e.g., +513x513 (or even 321x321), so that you could use a larger batch size. + +4. Use a smaller network backbone, such as MobileNet-v2. + +___ +Q6: How can I train the model asynchronously? + +A: In the train.py, the users could set `num_replicas` (number of machines for training) and `num_ps_tasks` (we usually set `num_ps_tasks` = `num_replicas` / 2). See slim.deployment.model_deploy for more details. +___ +Q7: I could not reproduce the performance even with the provided checkpoints. + +A: Please try running + +```bash +# Run the simple test with Xception_65 as network backbone. +sh local_test.sh +``` + +or + +```bash +# Run the simple test with MobileNet-v2 as network backbone. +sh local_test_mobilenetv2.sh +``` + +First, make sure you could reproduce the results with our provided setting. +After that, you could start to make a new change one at a time to help debug. +___ +Q8: What value of `eval_crop_size` should I use? + +A: Our model uses whole-image inference, meaning that we need to set `eval_crop_size` equal to `output_stride` * k + 1, where k is an integer and set k so that the resulting `eval_crop_size` is slightly larger the largest +image dimension in the dataset. For example, we have `eval_crop_size` = 513x513 for PASCAL dataset whose largest image dimension is 512. Similarly, we set `eval_crop_size` = 1025x2049 for Cityscapes images whose +image dimension is all equal to 1024x2048. +___ +Q9: Why multi-gpu training is slow? + +A: Please try to use more threads to pre-process the inputs. For, example change [num_readers = 4](https://github.com/tensorflow/models/blob/master/research/deeplab/train.py#L457). +___ + + +## References + +1. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
+ [[link]](https://arxiv.org/abs/1512.03385), In CVPR, 2016. + +2. **Semantic Contours from Inverse Detectors**
+ Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji, Jitendra Malik
+ [[link]](http://home.bharathh.info/pubs/codes/SBD/download.html), In ICCV, 2011. + +3. **Efficient Inference in Fully Connected CRFs with Gaussian Edge Potentials**
+ Philipp KrähenbĂ¼hl, Vladlen Koltun
+ [[link]](http://graphics.stanford.edu/projects/densecrf/), In NIPS, 2011. diff --git a/deeplab/models/research/deeplab/g3doc/img/image1.jpg b/deeplab/models/research/deeplab/g3doc/img/image1.jpg new file mode 100644 index 0000000..939b6f9 Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/image1.jpg differ diff --git a/deeplab/models/research/deeplab/g3doc/img/image2.jpg b/deeplab/models/research/deeplab/g3doc/img/image2.jpg new file mode 100644 index 0000000..5ec1b8a Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/image2.jpg differ diff --git a/deeplab/models/research/deeplab/g3doc/img/image3.jpg b/deeplab/models/research/deeplab/g3doc/img/image3.jpg new file mode 100644 index 0000000..d788e3d Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/image3.jpg differ diff --git a/deeplab/models/research/deeplab/g3doc/img/image_info.txt b/deeplab/models/research/deeplab/g3doc/img/image_info.txt new file mode 100644 index 0000000..583d113 --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/img/image_info.txt @@ -0,0 +1,13 @@ +Image provenance: + +image1.jpg: Philippe Put, + https://www.flickr.com/photos/34547181@N00/14499172124 + +image2.jpg: Peretz Partensky + https://www.flickr.com/photos/ifl/3926001309 + +image3.jpg: Peter Harrison + https://www.flickr.com/photos/devcentre/392585679 + + +vis[1-3].png: Showing original image together with DeepLab segmentation map. diff --git a/deeplab/models/research/deeplab/g3doc/img/vis1.png b/deeplab/models/research/deeplab/g3doc/img/vis1.png new file mode 100644 index 0000000..41b8ecd Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/vis1.png differ diff --git a/deeplab/models/research/deeplab/g3doc/img/vis2.png b/deeplab/models/research/deeplab/g3doc/img/vis2.png new file mode 100644 index 0000000..7fa7a4c Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/vis2.png differ diff --git a/deeplab/models/research/deeplab/g3doc/img/vis3.png b/deeplab/models/research/deeplab/g3doc/img/vis3.png new file mode 100644 index 0000000..813b634 Binary files /dev/null and b/deeplab/models/research/deeplab/g3doc/img/vis3.png differ diff --git a/deeplab/models/research/deeplab/g3doc/installation.md b/deeplab/models/research/deeplab/g3doc/installation.md new file mode 100644 index 0000000..591a1f8 --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/installation.md @@ -0,0 +1,73 @@ +# Installation + +## Dependencies + +DeepLab depends on the following libraries: + +* Numpy +* Pillow 1.0 +* tf Slim (which is included in the "tensorflow/models/research/" checkout) +* Jupyter notebook +* Matplotlib +* Tensorflow + +For detailed steps to install Tensorflow, follow the [Tensorflow installation +instructions](https://www.tensorflow.org/install/). A typical user can install +Tensorflow using one of the following commands: + +```bash +# For CPU +pip install tensorflow +# For GPU +pip install tensorflow-gpu +``` + +The remaining libraries can be installed on Ubuntu 14.04 using via apt-get: + +```bash +sudo apt-get install python-pil python-numpy +pip install --user jupyter +pip install --user matplotlib +pip install --user PrettyTable +``` + +## Add Libraries to PYTHONPATH + +When running locally, the tensorflow/models/research/ directory should be +appended to PYTHONPATH. This can be done by running the following from +tensorflow/models/research/: + +```bash +# From tensorflow/models/research/ +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + +# [Optional] for panoptic evaluation, you might need panopticapi: +# https://github.com/cocodataset/panopticapi +# Please clone it to a local directory ${PANOPTICAPI_DIR} +touch ${PANOPTICAPI_DIR}/panopticapi/__init__.py +export PYTHONPATH=$PYTHONPATH:${PANOPTICAPI_DIR}/panopticapi +``` + +Note: This command needs to run from every new terminal you start. If you wish +to avoid running this manually, you can add it as a new line to the end of your +~/.bashrc file. + +# Testing the Installation + +You can test if you have successfully installed the Tensorflow DeepLab by +running the following commands: + +Quick test by running model_test.py: + +```bash +# From tensorflow/models/research/ +python deeplab/model_test.py +``` + +Quick running the whole code on the PASCAL VOC 2012 dataset: + +```bash +# From tensorflow/models/research/deeplab +bash local_test.sh +``` + diff --git a/deeplab/models/research/deeplab/g3doc/model_zoo.md b/deeplab/models/research/deeplab/g3doc/model_zoo.md new file mode 100644 index 0000000..76972dc --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/model_zoo.md @@ -0,0 +1,254 @@ +# TensorFlow DeepLab Model Zoo + +We provide deeplab models pretrained several datasets, including (1) PASCAL VOC +2012, (2) Cityscapes, and (3) ADE20K for reproducing our results, as well as +some checkpoints that are only pretrained on ImageNet for training your own +models. + +## DeepLab models trained on PASCAL VOC 2012 + +Un-tar'ed directory includes: + +* a frozen inference graph (`frozen_inference_graph.pb`). All frozen inference + graphs by default use output stride of 8, a single eval scale of 1.0 and + no left-right flips, unless otherwise specified. MobileNet-v2 based models + do not include the decoder module. + +* a checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`) + +### Model details + +We provide several checkpoints that have been pretrained on VOC 2012 train_aug +set or train_aug + trainval set. In the former case, one could train their model +with smaller batch size and freeze batch normalization when limited GPU memory +is available, since we have already fine-tuned the batch normalization for you. +In the latter case, one could directly evaluate the checkpoints on VOC 2012 test +set or use this checkpoint for demo. Note *MobileNet-v2* based models do not +employ ASPP and decoder modules for fast computation. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +--------------------------- | :--------------: | :-----------------: | :---: | :-----: +mobilenetv2_dm05_coco_voc_trainaug | MobileNet-v2
Depth-Multiplier = 0.5 | ImageNet
MS-COCO
VOC 2012 train_aug set| N/A | N/A +mobilenetv2_dm05_coco_voc_trainval | MobileNet-v2
Depth-Multiplier = 0.5 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | N/A | N/A +mobilenetv2_coco_voc_trainaug | MobileNet-v2 | ImageNet
MS-COCO
VOC 2012 train_aug set| N/A | N/A +mobilenetv2_coco_voc_trainval | MobileNet-v2 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | N/A | N/A +xception65_coco_voc_trainaug | Xception_65 | ImageNet
MS-COCO
VOC 2012 train_aug set| [6,12,18] for OS=16
[12,24,36] for OS=8 | OS = 4 +xception65_coco_voc_trainval | Xception_65 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | [6,12,18] for OS=16
[12,24,36] for OS=8 | OS = 4 + +In the table, **OS** denotes output stride. + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Runtime (sec) | PASCAL mIOU | File Size +------------------------------------------------------------------------------------------------------------------------ | :-------: | :------------------------: | :-------------: | :------------------: | :------------: | :----------------------------: | :-------: +[mobilenetv2_dm05_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainaug_2018_10_01.tar.gz) | 16 | [1.0] | No | 0.88B | - | 70.19% (val) | 7.6MB +[mobilenetv2_dm05_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainval_2018_10_01.tar.gz) | 8 | [1.0] | No | 2.84B | - | 71.83% (test) | 7.6MB +[mobilenetv2_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz) | 16
8 | [1.0]
[0.5:0.25:1.75] | No
Yes | 2.75B
152.59B | 0.1
26.9 | 75.32% (val)
77.33 (val) | 23MB +[mobilenetv2_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 152.59B | 26.9 | 80.25% (**test**) | 23MB +[xception65_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz) | 16
8 | [1.0]
[0.5:0.25:1.75] | No
Yes | 54.17B
3055.35B | 0.7
223.2 | 82.20% (val)
83.58% (val) | 439MB +[xception65_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 3055.35B | 223.2 | 87.80% (**test**) | 439MB + +In the table, we report both computation complexity (in terms of Multiply-Adds +and CPU Runtime) and segmentation performance (in terms of mIOU) on the PASCAL +VOC val or test set. The reported runtime is calculated by tfprof on a +workstation with CPU E5-1650 v3 @ 3.50GHz and 32GB memory. Note that applying +multi-scale inputs and left-right flips increases the segmentation performance +but also significantly increases the computation and thus may not be suitable +for real-time applications. + +## DeepLab models trained on Cityscapes + +### Model details + +We provide several checkpoints that have been pretrained on Cityscapes +train_fine set. Note *MobileNet-v2* based model has been pretrained on MS-COCO +dataset and does not employ ASPP and decoder modules for fast computation. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +------------------------------------- | :--------------: | :-------------------------------------: | :----------------------------------------------: | :-----: +mobilenetv2_coco_cityscapes_trainfine | MobileNet-v2 | ImageNet
MS-COCO
Cityscapes train_fine set | N/A | N/A +mobilenetv3_large_cityscapes_trainfine | MobileNet-v3 Large | Cityscapes train_fine set
(No ImageNet) | N/A | OS = 8 +mobilenetv3_small_cityscapes_trainfine | MobileNet-v3 Small | Cityscapes train_fine set
(No ImageNet) | N/A | OS = 8 +xception65_cityscapes_trainfine | Xception_65 | ImageNet
Cityscapes train_fine set | [6, 12, 18] for OS=16
[12, 24, 36] for OS=8 | OS = 4 +xception71_dpc_cityscapes_trainfine | Xception_71 | ImageNet
MS-COCO
Cityscapes train_fine set | Dense Prediction Cell | OS = 4 +xception71_dpc_cityscapes_trainval | Xception_71 | ImageNet
MS-COCO
Cityscapes trainval_fine and coarse set | Dense Prediction Cell | OS = 4 + +In the table, **OS** denotes output stride. + +Note for mobilenet v3 models, we use additional commandline flags as follows: + +``` +--model_variant={ mobilenet_v3_large_seg | mobilenet_v3_small_seg } +--image_pooling_crop_size=769,769 +--image_pooling_stride=4,5 +--add_image_level_feature=1 +--aspp_convs_filters=128 +--aspp_with_concat_projection=0 +--aspp_with_squeeze_and_excitation=1 +--decoder_use_sum_merge=1 +--decoder_filters=19 +--decoder_output_is_logits=1 +--image_se_uses_qsigmoid=1 +--decoder_output_stride=8 +--output_stride=32 +``` + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Runtime (sec) | Cityscapes mIOU | File Size +-------------------------------------------------------------------------------------------------------------------------------- | :-------: | :-------------------------: | :-------------: | :-------------------: | :------------: | :----------------------------: | :-------: +[mobilenetv2_coco_cityscapes_trainfine](http://download.tensorflow.org/models/deeplabv3_mnv2_cityscapes_train_2018_02_05.tar.gz) | 16
8 | [1.0]
[0.75:0.25:1.25] | No
Yes | 21.27B
433.24B | 0.8
51.12 | 70.71% (val)
73.57% (val) | 23MB +[mobilenetv3_large_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_mnv3_large_cityscapes_trainfine_2019_11_15.tar.gz) | 32 | [1.0] | No | 15.95B | 0.6 | 72.41% (val) | 17MB +[mobilenetv3_small_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_mnv3_small_cityscapes_trainfine_2019_11_15.tar.gz) | 32 | [1.0] | No | 4.63B | 0.4 | 68.99% (val) | 5MB +[xception65_cityscapes_trainfine](http://download.tensorflow.org/models/deeplabv3_cityscapes_train_2018_02_06.tar.gz) | 16
8 | [1.0]
[0.75:0.25:1.25] | No
Yes | 418.64B
8677.92B | 5.0
422.8 | 78.79% (val)
80.42% (val) | 439MB +[xception71_dpc_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_cityscapes_xception71_trainfine_2018_09_08.tar.gz) | 16 | [1.0] | No | 502.07B | - | 80.31% (val) | 445MB +[xception71_dpc_cityscapes_trainval](http://download.tensorflow.org/models/deeplab_cityscapes_xception71_trainvalfine_2018_09_08.tar.gz) | 8 | [0.75:0.25:2] | Yes | - | - | 82.66% (**test**) | 446MB + +### EdgeTPU-DeepLab models on Cityscapes + +EdgeTPU is Google's machine learning accelerator architecture for edge devices +(exists in Coral devices and Pixel4's Neural Core). Leveraging nerual +architecture search (NAS, also named as Auto-ML) algorithms, +[EdgeTPU-Mobilenet](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) +has been released which yields higher hardware utilization, lower latency, as +well as better accuracy over Mobilenet-v2/v3. We use EdgeTPU-Mobilenet as the +backbone and provide checkpoints that have been pretrained on Cityscapes +train_fine set. We named them as EdgeTPU-DeepLab models. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +-------------------- | :----------------: | :----------------: | :--: | :-----: +EdgeTPU-DeepLab | EdgeMobilenet-1.0 | ImageNet | N/A | N/A +EdgeTPU-DeepLab-slim | EdgeMobilenet-0.75 | ImageNet | N/A | N/A + +For EdgeTPU-DeepLab-slim, the backbone feature extractor has depth multiplier = +0.75 and aspp_convs_filters = 128. We do not employ ASPP nor decoder modules to +further reduce the latency. We employ the same train/eval flags used for +MobileNet-v2 DeepLab model. Flags changed for EdgeTPU-DeepLab model are listed +here. + +``` +--decoder_output_stride='' +--aspp_convs_filters=256 +--model_variant=mobilenet_edgetpu +``` + +For EdgeTPU-DeepLab-slim, also include the following flags. + +``` +--depth_multiplier=0.75 +--aspp_convs_filters=128 +``` + +Checkpoint name | Eval OS | Eval scales | Cityscapes mIOU | Multiply-Adds | Simulator latency on Pixel 4 EdgeTPU +---------------------------------------------------------------------------------------------------- | :--------: | :---------: | :--------------------------: | :------------: | :----------------------------------: +[EdgeTPU-DeepLab](http://download.tensorflow.org/models/edgetpu-deeplab_2020_03_09.tar.gz) | 32
16 | [1.0] | 70.6% (val)
74.1% (val) | 5.6B
7.1B | 13.8 ms
17.5 ms +[EdgeTPU-DeepLab-slim](http://download.tensorflow.org/models/edgetpu-deeplab-slim_2020_03_09.tar.gz) | 32
16 | [1.0] | 70.0% (val)
73.2% (val) | 3.5B
4.3B | 9.9 ms
13.2 ms + +## DeepLab models trained on ADE20K + +### Model details + +We provide some checkpoints that have been pretrained on ADE20K training set. +Note that the model has only been pretrained on ImageNet, following the +dataset rule. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder | Input size +------------------------------------- | :--------------: | :-------------------------------------: | :----------------------------------------------: | :-----: | :-----: +mobilenetv2_ade20k_train | MobileNet-v2 | ImageNet
ADE20K training set | N/A | OS = 4 | 257x257 +xception65_ade20k_train | Xception_65 | ImageNet
ADE20K training set | [6, 12, 18] for OS=16
[12, 24, 36] for OS=8 | OS = 4 | 513x513 + +The input dimensions of ADE20K have a huge amount of variation. We resize inputs so that the longest size is 257 for MobileNet-v2 (faster inference) and 513 for Xception_65 (better performation). Note that we also include the decoder module in the MobileNet-v2 checkpoint. + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | mIOU | Pixel-wise Accuracy | File Size +------------------------------------- | :-------: | :-------------------------: | :-------------: | :-------------------: | :-------------------: | :-------: +[mobilenetv2_ade20k_train](http://download.tensorflow.org/models/deeplabv3_mnv2_ade20k_train_2018_12_03.tar.gz) | 16 | [1.0] | No | 32.04% (val) | 75.41% (val) | 24.8MB +[xception65_ade20k_train](http://download.tensorflow.org/models/deeplabv3_xception_ade20k_train_2018_05_29.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 45.65% (val) | 82.52% (val) | 439MB + + +## Checkpoints pretrained on ImageNet + +Un-tar'ed directory includes: + +* model checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`). + +### Model details + +We also provide some checkpoints that are pretrained on ImageNet and/or COCO (as +post-fixed in the model name) so that one could use this for training your own +models. + +* mobilenet_v2: We refer the interested users to the TensorFlow open source + [MobileNet-V2](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) + for details. + +* xception_{41,65,71}: We adapt the original Xception model to the task of + semantic segmentation with the following changes: (1) more layers, (2) all + max pooling operations are replaced by strided (atrous) separable + convolutions, and (3) extra batch-norm and ReLU after each 3x3 depthwise + convolution are added. We provide three Xception model variants with + different network depths. + +* resnet_v1_{50,101}_beta: We modify the original ResNet-101 [10], similar to + PSPNet [11] by replacing the first 7x7 convolution with three 3x3 + convolutions. See resnet_v1_beta.py for more details. + +Model name | File Size +-------------------------------------------------------------------------------------- | :-------: +[xception_41_imagenet](http://download.tensorflow.org/models/xception_41_2018_05_09.tar.gz ) | 288MB +[xception_65_imagenet](http://download.tensorflow.org/models/deeplabv3_xception_2018_01_04.tar.gz) | 447MB +[xception_65_imagenet_coco](http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz) | 292MB +[xception_71_imagenet](http://download.tensorflow.org/models/xception_71_2018_05_09.tar.gz ) | 474MB +[resnet_v1_50_beta_imagenet](http://download.tensorflow.org/models/resnet_v1_50_2018_05_04.tar.gz) | 274MB +[resnet_v1_101_beta_imagenet](http://download.tensorflow.org/models/resnet_v1_101_2018_05_04.tar.gz) | 477MB + +## References + +1. **Mobilenets: Efficient convolutional neural networks for mobile vision applications**
+ Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam
+ [[link]](https://arxiv.org/abs/1704.04861). arXiv:1704.04861, 2017. + +2. **Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation**
+ Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
+ [[link]](https://arxiv.org/abs/1801.04381). arXiv:1801.04381, 2018. + +3. **Xception: Deep Learning with Depthwise Separable Convolutions**
+ François Chollet
+ [[link]](https://arxiv.org/abs/1610.02357). In the Proc. of CVPR, 2017. + +4. **Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge 2017 Entry**
+ Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei, Jifeng Dai
+ [[link]](http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf). ICCV COCO Challenge + Workshop, 2017. + +5. **The Pascal Visual Object Classes Challenge: A Retrospective**
+ Mark Everingham, S. M. Ali Eslami, Luc Van Gool, Christopher K. I. Williams, John M. Winn, Andrew Zisserman
+ [[link]](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). IJCV, 2014. + +6. **Semantic Contours from Inverse Detectors**
+ Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji, Jitendra Malik
+ [[link]](http://home.bharathh.info/pubs/codes/SBD/download.html). In the Proc. of ICCV, 2011. + +7. **The Cityscapes Dataset for Semantic Urban Scene Understanding**
+ Cordts, Marius, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, Bernt Schiele.
+ [[link]](https://www.cityscapes-dataset.com/). In the Proc. of CVPR, 2016. + +8. **Microsoft COCO: Common Objects in Context**
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, Piotr Dollar
+ [[link]](http://cocodataset.org/). In the Proc. of ECCV, 2014. + +9. **ImageNet Large Scale Visual Recognition Challenge**
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, Li Fei-Fei
+ [[link]](http://www.image-net.org/). IJCV, 2015. + +10. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
+ [[link]](https://arxiv.org/abs/1512.03385). CVPR, 2016. + +11. **Pyramid Scene Parsing Network**
+ Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, Jiaya Jia
+ [[link]](https://arxiv.org/abs/1612.01105). In CVPR, 2017. + +12. **Scene Parsing through ADE20K Dataset**
+ Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, Antonio Torralba
+ [[link]](http://groups.csail.mit.edu/vision/datasets/ADE20K/). In CVPR, + 2017. + +13. **Searching for MobileNetV3**
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam
+ [[link]](https://arxiv.org/abs/1905.02244). In ICCV, 2019. diff --git a/deeplab/models/research/deeplab/g3doc/pascal.md b/deeplab/models/research/deeplab/g3doc/pascal.md new file mode 100644 index 0000000..f4bc84e --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/pascal.md @@ -0,0 +1,161 @@ +# Running DeepLab on PASCAL VOC 2012 Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on PASCAL VOC 2012 on +a local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to download and +convert PASCAL VOC 2012 semantic segmentation dataset to TFRecord. + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +sh download_and_convert_voc2012.sh +``` + +The converted dataset will be saved at +./deeplab/datasets/pascal_voc_seg/tfrecord + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + + pascal_voc_seg + + VOCdevkit + + VOC2012 + + JPEGImages + + SegmentationClass + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the PASCAL VOC 2012 train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=30000 \ + --train_split="train" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=1 \ + --dataset="pascal_voc_seg" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_INITIAL_CHECKPOINT} is the path to the initial checkpoint +(usually an ImageNet pretrained checkpoint), ${PATH_TO_TRAIN_DIR} is the +directory in which training checkpoints and events will be written to, and +${PATH_TO_DATASET} is the directory in which the PASCAL VOC 2012 dataset +resides. + +**Note that for {train,eval,vis}.py:** + +1. In order to reproduce our results, one needs to use large batch size (> 12), + and set fine_tune_batch_norm = True. Here, we simply use small batch size + during training for the purpose of demonstration. If the users have limited + GPU memory at hand, please fine-tune from our provided checkpoints whose + batch norm parameters have been trained, and use smaller learning rate with + fine_tune_batch_norm = False. + +2. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +3. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +A local evaluation job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="513,513" \ + --dataset="pascal_voc_seg" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --eval_logdir=${PATH_TO_EVAL_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_EVAL_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +PASCAL VOC 2012 dataset resides. + +A local visualization job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="513,513" \ + --dataset="pascal_voc_seg" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --vis_logdir=${PATH_TO_VIS_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_VIS_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +PASCAL VOC 2012 dataset resides. Note that if the users would like to save the +segmentation results for evaluation server, set also_save_raw_predictions = +True. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the +train, eval, and vis directories (e.g., the folder `train_on_train_set` in the +above example). Please note it may take Tensorboard a couple minutes to populate +with data. + +## Example + +We provide a script to run the {train,eval,vis,export_model}.py on the PASCAL VOC +2012 dataset as an example. See the code in local_test.sh for details. + +```bash +# From tensorflow/models/research/deeplab +sh local_test.sh +``` diff --git a/deeplab/models/research/deeplab/g3doc/quantize.md b/deeplab/models/research/deeplab/g3doc/quantize.md new file mode 100644 index 0000000..65dbdd7 --- /dev/null +++ b/deeplab/models/research/deeplab/g3doc/quantize.md @@ -0,0 +1,103 @@ +# Quantize DeepLab model for faster on-device inference + +This page describes the steps required to quantize DeepLab model and convert it +to TFLite for on-device inference. The main steps include: + +1. Quantization-aware training +1. Exporting model +1. Converting to TFLite FlatBuffer + +We provide details for each step below. + +## Quantization-aware training + +DeepLab supports two approaches to quantize your model. + +1. **[Recommended]** Training a non-quantized model until convergence. Then + fine-tune the trained float model with quantization using a small learning + rate (on PASCAL we use the value of 3e-5) . This fine-tuning step usually + takes 2k to 5k steps to converge. + +1. Training a deeplab float model with delayed quantization. Usually we delay + quantization until the last a few thousand steps in training. + +In the current implementation, quantization is only supported with 1) +`num_clones=1` for training and 2) single scale inference for evaluation, +visualization and model export. To get the best performance for the quantized +model, we strongly recommend to train the float model with larger `num_clones` +and then fine-tune the model with a single clone. + +Here shows the commandline to quantize deeplab model trained on PASCAL VOC +dataset using fine-tuning: + +``` +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=3000 \ + --train_split="train" \ + --model_variant="mobilenet_v2" \ + --output_stride=16 \ + --train_crop_size="513,513" \ + --train_batch_size=8 \ + --base_learning_rate=3e-5 \ + --dataset="pascal_voc_seg" \ + --quantize_delay_step=0 \ + --tf_initial_checkpoint=${PATH_TO_TRAINED_FLOAT_MODEL} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +## Converting to TFLite FlatBuffer + +First use the following commandline to export your trained model. + +``` +# From tensorflow/models/research/ +python deeplab/export_model.py \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --quantize_delay_step=0 \ + --export_path=${OUTPUT_DIR}/frozen_inference_graph.pb + +``` + +Commandline below shows how to convert exported graphdef to TFlite model. + +``` +# From tensorflow/models/research/ +python deeplab/convert_to_tflite.py \ + --quantized_graph_def_path=${OUTPUT_DIR}/frozen_inference_graph.pb \ + --input_tensor_name=MobilenetV2/MobilenetV2/input:0 \ + --output_tflite_path=${OUTPUT_DIR}/frozen_inference_graph.tflite \ + --test_image_path=${PATH_TO_TEST_IMAGE} +``` + +**[Important]** Note that converted model expects 513x513 RGB input and doesn't +include preprocessing (resize and pad input image) and post processing (crop +padded region and resize to original input size). These steps can be implemented +outside of TFlite model. + +## Quantized model on PASCAL VOC + +We provide float and quantized checkpoints that have been pretrained on VOC 2012 +train_aug set, using MobileNet-v2 backbone with different depth multipliers. +Quantized model usually have 1% decay in mIoU. + +For quantized (8bit) model, un-tar'ed directory includes: + +* a frozen inference graph (frozen_inference_graph.pb) + +* a checkpoint (model.ckpt.data*, model.ckpt.index) + +* a converted TFlite FlatBuffer file (frozen_inference_graph.tflite) + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Quantize | PASCAL mIOU | Folder Size | TFLite File Size +-------------------------------------------------------------------------------------------------------------------------------------------- | :-----: | :---------: | :-------------: | :-----------: | :------: | :----------: | :-------: | :-------: +[mobilenetv2_dm05_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainaug_2018_10_01.tar.gz) | 16 | [1.0] | No | 0.88B | No | 70.19% (val) | 7.6MB | N/A +[mobilenetv2_dm05_coco_voc_trainaug_8bit](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_train_aug_8bit_2019_04_26.tar.gz) | 16 | [1.0] | No | 0.88B | Yes | 69.65% (val) | 8.2MB | 751.1KB +[mobilenetv2_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz) | 16 | [1.0] | No | 2.75B | No | 75.32% (val) | 23MB | N/A +[mobilenetv2_coco_voc_trainaug_8bit](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_8bit_2019_04_26.tar.gz) | 16 | [1.0] | No | 2.75B | Yes | 74.26% (val) | 24MB | 2.2MB + +Note that you might need the nightly build of TensorFlow (see +[here](https://www.tensorflow.org/install) for install instructions) to convert +above quantized model to TFLite. diff --git a/deeplab/models/research/deeplab/inference.py b/deeplab/models/research/deeplab/inference.py new file mode 100644 index 0000000..543cc23 --- /dev/null +++ b/deeplab/models/research/deeplab/inference.py @@ -0,0 +1,172 @@ +import os +from io import BytesIO +import tarfile +import tempfile +from six.moves import urllib + +from matplotlib import gridspec +from matplotlib import pyplot as plt +import numpy as np +from PIL import Image + +import tensorflow as tf +from matplotlib import cm + + + +class DeepLabModel(object): + """Class to load deeplab model and run inference.""" + + INPUT_TENSOR_NAME = 'ImageTensor:0' + OUTPUT_TENSOR_NAME = 'SemanticPredictions:0' + INPUT_SIZE = 448 + FROZEN_GRAPH_NAME = 'frozen_inference_graph' + + def __init__(self, tarball_path): + """Creates and loads pretrained deeplab model.""" + self.graph = tf.Graph() + + # graph_def = None + # Extract frozen graph from tar archive. + # tar_file = tarfile.open(tarball_path) + # for tar_info in tar_file.getmembers(): + # if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name): + # file_handle = tar_file.extractfile(tar_info) + # graph_def = tf.GraphDef.FromString(file_handle.read()) + # break + + # tar_file.close() + + def load_pb(path_to_pb): + with tf.gfile.GFile(path_to_pb, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='') + return graph + self.graph = load_pb(tarball_path) + + # graph_def = tf.GraphDef() + # graph_def.ParseFromString(open(tarball_path, 'rb')) + + # if graph_def is None: + # raise RuntimeError('Cannot find inference graph in tar archive.') + + # with self.graph.as_default(): + # tf.import_graph_def(graph_def, name='') + + self.sess = tf.Session(graph=self.graph) + + def run(self, image): + """Runs inference on a single image. + + Args: + image: A PIL.Image object, raw input image. + + Returns: + resized_image: RGB image resized from original input image. + seg_map: Segmentation map of `resized_image`. + """ + width, height = image.size + resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height) + target_size = (int(resize_ratio * width), int(resize_ratio * height)) + resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS) + batch_seg_map = self.sess.run( + self.OUTPUT_TENSOR_NAME, + feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]}) + seg_map = batch_seg_map[0] + return resized_image, seg_map + + +def create_pascal_label_colormap(): + """Creates a label colormap used in PASCAL VOC segmentation benchmark. + + Returns: + A Colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=int) + ind = np.arange(256, dtype=int) + + for shift in reversed(range(8)): + for channel in range(3): + colormap[:, channel] |= ((ind >> channel) & 1) << shift + ind >>= 3 + + return colormap + + +def label_to_color_image(label): + """Adds color defined by the dataset colormap to the label. + + Args: + label: A 2D array with integer type, storing the segmentation label. + + Returns: + result: A 2D array with floating type. The element of the array + is the color indexed by the corresponding element in the input label + to the PASCAL color map. + + Raises: + ValueError: If label is not of rank 2 or its value is larger than color + map maximum entry. + """ + if label.ndim != 2: + raise ValueError('Expect 2-D input label') + + colormap = create_pascal_label_colormap() + + if np.max(label) >= len(colormap): + raise ValueError('label value too large.') + + return colormap[label] + + +def vis_segmentation(image, seg_map): + """Visualizes input image, segmentation map and overlay view.""" + plt.figure(figsize=(15, 5)) + grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1]) + + plt.subplot(grid_spec[0]) + plt.imshow(image) + plt.axis('off') + plt.title('input image') + + plt.subplot(grid_spec[1]) + seg_image = label_to_color_image(seg_map).astype(np.uint8) + plt.imshow(seg_image) + plt.axis('off') + plt.title('segmentation map') + + plt.subplot(grid_spec[2]) + plt.imshow(image) + plt.imshow(seg_image, alpha=0.7) + plt.axis('off') + plt.title('segmentation overlay') + + unique_labels = np.unique(seg_map) + print(unique_labels) + ax = plt.subplot(grid_spec[3]) + plt.imshow( + FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest') + ax.yaxis.tick_right() + plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) + plt.xticks([], []) + ax.tick_params(width=0.0) + plt.grid('off') + plt.show() + + +LABEL_NAMES = np.asarray([ + 'Background', 'Heart_O' +]) + +FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) +FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) + +model_path = '/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/export/frozen_inference_graph.pb' +MODEL = DeepLabModel(model_path) + + +img = Image.open('/Users/mandywoo/Documents/UAV-Forge/image-proc_2020-21/models/research/deeplab/datasets/PQR/dataset/JPEGImages/Heart_O_img_0.jpg') +resized_image, seg_map = MODEL.run(img) +vis_segmentation(resized_image, seg_map) \ No newline at end of file diff --git a/deeplab/models/research/deeplab/input_preprocess.py b/deeplab/models/research/deeplab/input_preprocess.py new file mode 100644 index 0000000..9ca8bce --- /dev/null +++ b/deeplab/models/research/deeplab/input_preprocess.py @@ -0,0 +1,139 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Prepares the data used for DeepLab training/evaluation.""" +import tensorflow as tf +from deeplab.core import feature_extractor +from deeplab.core import preprocess_utils + + +# The probability of flipping the images and labels +# left-right during training +_PROB_OF_FLIP = 0.5 + + +def preprocess_image_and_label(image, + label, + crop_height, + crop_width, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + ignore_label=255, + is_training=True, + model_variant=None): + """Preprocesses the image and label. + + Args: + image: Input image. + label: Ground truth annotation label. + crop_height: The height value used to crop the image and label. + crop_width: The width value used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + ignore_label: The label value which will be ignored for training and + evaluation. + is_training: If the preprocessing is used for training or not. + model_variant: Model variant (string) for choosing how to mean-subtract the + images. See feature_extractor.network_map for supported model variants. + + Returns: + original_image: Original image (could be resized). + processed_image: Preprocessed image. + label: Preprocessed ground truth segmentation label. + + Raises: + ValueError: Ground truth label not provided during training. + """ + if is_training and label is None: + raise ValueError('During training, label must be provided.') + if model_variant is None: + tf.logging.warning('Default mean-subtraction is performed. Please specify ' + 'a model_variant. See feature_extractor.network_map for ' + 'supported model variants.') + + # Keep reference to original image. + original_image = image + + processed_image = tf.cast(image, tf.float32) + + if label is not None: + label = tf.cast(label, tf.int32) + + # Resize image and label to the desired range. + if min_resize_value or max_resize_value: + [processed_image, label] = ( + preprocess_utils.resize_to_range( + image=processed_image, + label=label, + min_size=min_resize_value, + max_size=max_resize_value, + factor=resize_factor, + align_corners=True)) + # The `original_image` becomes the resized image. + original_image = tf.identity(processed_image) + + # Data augmentation by randomly scaling the inputs. + if is_training: + scale = preprocess_utils.get_random_scale( + min_scale_factor, max_scale_factor, scale_factor_step_size) + processed_image, label = preprocess_utils.randomly_scale_image_and_label( + processed_image, label, scale) + processed_image.set_shape([None, None, 3]) + + # Pad image and label to have dimensions >= [crop_height, crop_width] + image_shape = tf.shape(processed_image) + image_height = image_shape[0] + image_width = image_shape[1] + + target_height = image_height + tf.maximum(crop_height - image_height, 0) + target_width = image_width + tf.maximum(crop_width - image_width, 0) + + # Pad image with mean pixel value. + mean_pixel = tf.reshape( + feature_extractor.mean_pixel(model_variant), [1, 1, 3]) + processed_image = preprocess_utils.pad_to_bounding_box( + processed_image, 0, 0, target_height, target_width, mean_pixel) + + if label is not None: + label = preprocess_utils.pad_to_bounding_box( + label, 0, 0, target_height, target_width, ignore_label) + + # Randomly crop the image and label. + if is_training and label is not None: + processed_image, label = preprocess_utils.random_crop( + [processed_image, label], crop_height, crop_width) + + processed_image.set_shape([crop_height, crop_width, 3]) + + if label is not None: + label.set_shape([crop_height, crop_width, 1]) + + if is_training: + # Randomly left-right flip the image and label. + processed_image, label, _ = preprocess_utils.flip_dim( + [processed_image, label], _PROB_OF_FLIP, dim=1) + + return original_image, processed_image, label diff --git a/deeplab/models/research/deeplab/local_test.sh b/deeplab/models/research/deeplab/local_test.sh new file mode 100644 index 0000000..e568ead --- /dev/null +++ b/deeplab/models/research/deeplab/local_test.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to run local test on PASCAL VOC 2012. Users could also +# modify from this script for their use case. +# +# Usage: +# # From the tensorflow/models/research/deeplab directory. +# bash ./local_test.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim +# set PYTHONPATH=$PYTHONPATH:`../tensorflow/models/research`:`../tensorflow/models/research/slim` + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/deeplab" + +# Run model_test first to make sure the PYTHONPATH is correctly set. +python3.7 "${WORK_DIR}"/model_test.py + +# Go to datasets folder and download PASCAL VOC 2012 segmentation dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +bash download_and_convert_voc2012.sh + +# Go back to original directory. +cd "${CURRENT_DIR}" + +# Set up the working directories. +PASCAL_FOLDER="pascal_voc_seg" +EXP_FOLDER="exp/train_on_trainval_set" +INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/init_models" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/train" +EVAL_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/eval" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/vis" +EXPORT_DIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/export" +mkdir -p "${INIT_FOLDER}" +mkdir -p "${TRAIN_LOGDIR}" +mkdir -p "${EVAL_LOGDIR}" +mkdir -p "${VIS_LOGDIR}" +mkdir -p "${EXPORT_DIR}" + +# Copy locally the trained checkpoint as the initial checkpoint. +TF_INIT_ROOT="http://download.tensorflow.org/models" +TF_INIT_CKPT="deeplabv3_pascal_train_aug_2018_01_04.tar.gz" +cd "${INIT_FOLDER}" +wget -nd -c "${TF_INIT_ROOT}/${TF_INIT_CKPT}" +tar -xf "${TF_INIT_CKPT}" +cd "${CURRENT_DIR}" + +PASCAL_DATASET="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/tfrecord" + +# Train 10 iterations. +NUM_ITERATIONS=10 +python3.7 "${WORK_DIR}"/train.py \ + --logtostderr \ + --train_split="trainval" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --training_number_of_steps="${NUM_ITERATIONS}" \ + --fine_tune_batch_norm=true \ + --tf_initial_checkpoint="${INIT_FOLDER}/deeplabv3_pascal_train_aug/model.ckpt" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" + +# Run evaluation. This performs eval over the full val split (1449 images) and +# will take a while. +# Using the provided checkpoint, one should expect mIOU=82.20%. +python3.7 "${WORK_DIR}"/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --eval_logdir="${EVAL_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_evaluations=1 + +# Visualize the results. +python3.7 "${WORK_DIR}"/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_iterations=1 + +# Export the trained checkpoint. +CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}" +EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb" + +python3.7 "${WORK_DIR}"/export_model.py \ + --logtostderr \ + --checkpoint_path="${CKPT_PATH}" \ + --export_path="${EXPORT_PATH}" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --num_classes=21 \ + --crop_size=513 \ + --crop_size=513 \ + --inference_scales=1.0 + +# Run inference with the exported checkpoint. +# Please refer to the provided deeplab_demo.ipynb for an example. diff --git a/deeplab/models/research/deeplab/local_test_mobilenetv2.sh b/deeplab/models/research/deeplab/local_test_mobilenetv2.sh new file mode 100644 index 0000000..c38646f --- /dev/null +++ b/deeplab/models/research/deeplab/local_test_mobilenetv2.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to run local test on PASCAL VOC 2012 using MobileNet-v2. +# Users could also modify from this script for their use case. +# +# Usage: +# # From the tensorflow/models/research/deeplab directory. +# sh ./local_test_mobilenetv2.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/deeplab" + +# Run model_test first to make sure the PYTHONPATH is correctly set. +python "${WORK_DIR}"/model_test.py -v + +# Go to datasets folder and download PASCAL VOC 2012 segmentation dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +sh download_and_convert_voc2012.sh + +# Go back to original directory. +cd "${CURRENT_DIR}" + +# Set up the working directories. +PASCAL_FOLDER="pascal_voc_seg" +EXP_FOLDER="exp/train_on_trainval_set_mobilenetv2" +INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/init_models" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/train" +EVAL_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/eval" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/vis" +EXPORT_DIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/export" +mkdir -p "${INIT_FOLDER}" +mkdir -p "${TRAIN_LOGDIR}" +mkdir -p "${EVAL_LOGDIR}" +mkdir -p "${VIS_LOGDIR}" +mkdir -p "${EXPORT_DIR}" + +# Copy locally the trained checkpoint as the initial checkpoint. +TF_INIT_ROOT="http://download.tensorflow.org/models" +CKPT_NAME="deeplabv3_mnv2_pascal_train_aug" +TF_INIT_CKPT="${CKPT_NAME}_2018_01_29.tar.gz" +cd "${INIT_FOLDER}" +wget -nd -c "${TF_INIT_ROOT}/${TF_INIT_CKPT}" +tar -xf "${TF_INIT_CKPT}" +cd "${CURRENT_DIR}" + +PASCAL_DATASET="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/tfrecord" + +# Train 10 iterations. +NUM_ITERATIONS=10 +python "${WORK_DIR}"/train.py \ + --logtostderr \ + --train_split="trainval" \ + --model_variant="mobilenet_v2" \ + --output_stride=16 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --training_number_of_steps="${NUM_ITERATIONS}" \ + --fine_tune_batch_norm=true \ + --tf_initial_checkpoint="${INIT_FOLDER}/${CKPT_NAME}/model.ckpt-30000" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" + +# Run evaluation. This performs eval over the full val split (1449 images) and +# will take a while. +# Using the provided checkpoint, one should expect mIOU=75.34%. +python "${WORK_DIR}"/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="mobilenet_v2" \ + --eval_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --eval_logdir="${EVAL_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_evaluations=1 + +# Visualize the results. +python "${WORK_DIR}"/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="mobilenet_v2" \ + --vis_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_iterations=1 + +# Export the trained checkpoint. +CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}" +EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb" + +python "${WORK_DIR}"/export_model.py \ + --logtostderr \ + --checkpoint_path="${CKPT_PATH}" \ + --export_path="${EXPORT_PATH}" \ + --model_variant="mobilenet_v2" \ + --num_classes=21 \ + --crop_size=513 \ + --crop_size=513 \ + --inference_scales=1.0 + +# Run inference with the exported checkpoint. +# Please refer to the provided deeplab_demo.ipynb for an example. diff --git a/deeplab/models/research/deeplab/model.py b/deeplab/models/research/deeplab/model.py new file mode 100644 index 0000000..311aaa1 --- /dev/null +++ b/deeplab/models/research/deeplab/model.py @@ -0,0 +1,911 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Provides DeepLab model definition and helper functions. + +DeepLab is a deep learning system for semantic image segmentation with +the following features: + +(1) Atrous convolution to explicitly control the resolution at which +feature responses are computed within Deep Convolutional Neural Networks. + +(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at +multiple scales with filters at multiple sampling rates and effective +fields-of-views. + +(3) ASPP module augmented with image-level feature and batch normalization. + +(4) A simple yet effective decoder module to recover the object boundaries. + +See the following papers for more details: + +"Encoder-Decoder with Atrous Separable Convolution for Semantic Image +Segmentation" +Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. +(https://arxiv.org/abs/1802.02611) + +"Rethinking Atrous Convolution for Semantic Image Segmentation," +Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam +(https://arxiv.org/abs/1706.05587) + +"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, +Atrous Convolution, and Fully Connected CRFs", +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L Yuille (* equal contribution) +(https://arxiv.org/abs/1606.00915) + +"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected +CRFs" +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L. Yuille (* equal contribution) +(https://arxiv.org/abs/1412.7062) +""" +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import dense_prediction_cell +from deeplab.core import feature_extractor +from deeplab.core import utils + +slim = contrib_slim + +LOGITS_SCOPE_NAME = 'logits' +MERGED_LOGITS_SCOPE = 'merged_logits' +IMAGE_POOLING_SCOPE = 'image_pooling' +ASPP_SCOPE = 'aspp' +CONCAT_PROJECTION_SCOPE = 'concat_projection' +DECODER_SCOPE = 'decoder' +META_ARCHITECTURE_SCOPE = 'meta_architecture' + +PROB_SUFFIX = '_prob' + +_resize_bilinear = utils.resize_bilinear +scale_dimension = utils.scale_dimension +split_separable_conv2d = utils.split_separable_conv2d + + +def get_extra_layer_scopes(last_layers_contain_logits_only=False): + """Gets the scopes for extra layers. + + Args: + last_layers_contain_logits_only: Boolean, True if only consider logits as + the last layer (i.e., exclude ASPP module, decoder module and so on) + + Returns: + A list of scopes for extra layers. + """ + if last_layers_contain_logits_only: + return [LOGITS_SCOPE_NAME] + else: + return [ + LOGITS_SCOPE_NAME, + IMAGE_POOLING_SCOPE, + ASPP_SCOPE, + CONCAT_PROJECTION_SCOPE, + DECODER_SCOPE, + META_ARCHITECTURE_SCOPE, + ] + + +def predict_labels_multi_scale(images, + model_options, + eval_scales=(1.0,), + add_flipped_images=False): + """Predicts segmentation labels. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + eval_scales: The scales to resize images for evaluation. + add_flipped_images: Add flipped images for evaluation or not. + + Returns: + A dictionary with keys specifying the output_type (e.g., semantic + prediction) and values storing Tensors representing predictions (argmax + over channels). Each prediction has size [batch, height, width]. + """ + outputs_to_predictions = { + output: [] + for output in model_options.outputs_to_num_classes + } + + for i, image_scale in enumerate(eval_scales): + with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): + outputs_to_scales_to_logits = multi_scale_logits( + images, + model_options=model_options, + image_pyramid=[image_scale], + is_training=False, + fine_tune_batch_norm=False) + + if add_flipped_images: + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + outputs_to_scales_to_logits_reversed = multi_scale_logits( + tf.reverse_v2(images, [2]), + model_options=model_options, + image_pyramid=[image_scale], + is_training=False, + fine_tune_batch_norm=False) + + for output in sorted(outputs_to_scales_to_logits): + scales_to_logits = outputs_to_scales_to_logits[output] + logits = _resize_bilinear( + scales_to_logits[MERGED_LOGITS_SCOPE], + tf.shape(images)[1:3], + scales_to_logits[MERGED_LOGITS_SCOPE].dtype) + outputs_to_predictions[output].append( + tf.expand_dims(tf.nn.softmax(logits), 4)) + + if add_flipped_images: + scales_to_logits_reversed = ( + outputs_to_scales_to_logits_reversed[output]) + logits_reversed = _resize_bilinear( + tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), + tf.shape(images)[1:3], + scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) + outputs_to_predictions[output].append( + tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) + + for output in sorted(outputs_to_predictions): + predictions = outputs_to_predictions[output] + # Compute average prediction across different scales and flipped images. + predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) + outputs_to_predictions[output] = tf.argmax(predictions, 3) + outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) + + return outputs_to_predictions + + +def predict_labels(images, model_options, image_pyramid=None): + """Predicts segmentation labels. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + + Returns: + A dictionary with keys specifying the output_type (e.g., semantic + prediction) and values storing Tensors representing predictions (argmax + over channels). Each prediction has size [batch, height, width]. + """ + outputs_to_scales_to_logits = multi_scale_logits( + images, + model_options=model_options, + image_pyramid=image_pyramid, + is_training=False, + fine_tune_batch_norm=False) + + predictions = {} + for output in sorted(outputs_to_scales_to_logits): + scales_to_logits = outputs_to_scales_to_logits[output] + logits = scales_to_logits[MERGED_LOGITS_SCOPE] + # There are two ways to obtain the final prediction results: (1) bilinear + # upsampling the logits followed by argmax, or (2) argmax followed by + # nearest neighbor upsampling. The second option may introduce the "blocking + # effect" but is computationally efficient. + if model_options.prediction_with_upsampled_logits: + logits = _resize_bilinear(logits, + tf.shape(images)[1:3], + scales_to_logits[MERGED_LOGITS_SCOPE].dtype) + predictions[output] = tf.argmax(logits, 3) + predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits) + else: + argmax_results = tf.argmax(logits, 3) + argmax_results = tf.image.resize_nearest_neighbor( + tf.expand_dims(argmax_results, 3), + tf.shape(images)[1:3], + align_corners=True, + name='resize_prediction') + predictions[output] = tf.squeeze(argmax_results, 3) + predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear( + tf.nn.softmax(logits), + tf.shape(images)[1:3], + align_corners=True, + name='resize_prob') + return predictions + + +def multi_scale_logits(images, + model_options, + image_pyramid, + weight_decay=0.0001, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Gets the logits for multi-scale inputs. + + The returned logits are all downsampled (due to max-pooling layers) + for both training and evaluation. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + weight_decay: The weight decay for model variables. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + outputs_to_scales_to_logits: A map of maps from output_type (e.g., + semantic prediction) to a dictionary of multi-scale logits names to + logits. For each output_type, the dictionary has keys which + correspond to the scales and values which correspond to the logits. + For example, if `scales` equals [1.0, 1.5], then the keys would + include 'merged_logits', 'logits_1.00' and 'logits_1.50'. + + Raises: + ValueError: If model_options doesn't specify crop_size and its + add_image_level_feature = True, since add_image_level_feature requires + crop_size information. + """ + # Setup default values. + if not image_pyramid: + image_pyramid = [1.0] + crop_height = ( + model_options.crop_size[0] + if model_options.crop_size else tf.shape(images)[1]) + crop_width = ( + model_options.crop_size[1] + if model_options.crop_size else tf.shape(images)[2]) + if model_options.image_pooling_crop_size: + image_pooling_crop_height = model_options.image_pooling_crop_size[0] + image_pooling_crop_width = model_options.image_pooling_crop_size[1] + + # Compute the height, width for the output logits. + if model_options.decoder_output_stride: + logits_output_stride = min(model_options.decoder_output_stride) + else: + logits_output_stride = model_options.output_stride + + logits_height = scale_dimension( + crop_height, + max(1.0, max(image_pyramid)) / logits_output_stride) + logits_width = scale_dimension( + crop_width, + max(1.0, max(image_pyramid)) / logits_output_stride) + + # Compute the logits for each scale in the image pyramid. + outputs_to_scales_to_logits = { + k: {} + for k in model_options.outputs_to_num_classes + } + + num_channels = images.get_shape().as_list()[-1] + + for image_scale in image_pyramid: + if image_scale != 1.0: + scaled_height = scale_dimension(crop_height, image_scale) + scaled_width = scale_dimension(crop_width, image_scale) + scaled_crop_size = [scaled_height, scaled_width] + scaled_images = _resize_bilinear(images, scaled_crop_size, images.dtype) + if model_options.crop_size: + scaled_images.set_shape( + [None, scaled_height, scaled_width, num_channels]) + # Adjust image_pooling_crop_size accordingly. + scaled_image_pooling_crop_size = None + if model_options.image_pooling_crop_size: + scaled_image_pooling_crop_size = [ + scale_dimension(image_pooling_crop_height, image_scale), + scale_dimension(image_pooling_crop_width, image_scale)] + else: + scaled_crop_size = model_options.crop_size + scaled_images = images + scaled_image_pooling_crop_size = model_options.image_pooling_crop_size + + updated_options = model_options._replace( + crop_size=scaled_crop_size, + image_pooling_crop_size=scaled_image_pooling_crop_size) + outputs_to_logits = _get_logits( + scaled_images, + updated_options, + weight_decay=weight_decay, + reuse=tf.AUTO_REUSE, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_training_hyper_parameters=nas_training_hyper_parameters) + + # Resize the logits to have the same dimension before merging. + for output in sorted(outputs_to_logits): + outputs_to_logits[output] = _resize_bilinear( + outputs_to_logits[output], [logits_height, logits_width], + outputs_to_logits[output].dtype) + + # Return when only one input scale. + if len(image_pyramid) == 1: + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + MERGED_LOGITS_SCOPE] = outputs_to_logits[output] + return outputs_to_scales_to_logits + + # Save logits to the output map. + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + 'logits_%.2f' % image_scale] = outputs_to_logits[output] + + # Merge the logits from all the multi-scale inputs. + for output in sorted(model_options.outputs_to_num_classes): + # Concatenate the multi-scale logits for each output type. + all_logits = [ + tf.expand_dims(logits, axis=4) + for logits in outputs_to_scales_to_logits[output].values() + ] + all_logits = tf.concat(all_logits, 4) + merge_fn = ( + tf.reduce_max + if model_options.merge_method == 'max' else tf.reduce_mean) + outputs_to_scales_to_logits[output][MERGED_LOGITS_SCOPE] = merge_fn( + all_logits, axis=4) + + return outputs_to_scales_to_logits + + +def extract_features(images, + model_options, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Extracts features by the particular model_variant. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + concat_logits: A tensor of size [batch, feature_height, feature_width, + feature_channels], where feature_height/feature_width are determined by + the images height/width and output_stride. + end_points: A dictionary from components of the network to the corresponding + activation. + """ + features, end_points = feature_extractor.extract_features( + images, + output_stride=model_options.output_stride, + multi_grid=model_options.multi_grid, + model_variant=model_options.model_variant, + depth_multiplier=model_options.depth_multiplier, + divisible_by=model_options.divisible_by, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + preprocessed_images_dtype=model_options.preprocessed_images_dtype, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_architecture_options=model_options.nas_architecture_options, + nas_training_hyper_parameters=nas_training_hyper_parameters, + use_bounded_activation=model_options.use_bounded_activation) + + if not model_options.aspp_with_batch_norm: + return features, end_points + else: + if model_options.dense_prediction_cell_config is not None: + tf.logging.info('Using dense prediction cell config.') + dense_prediction_layer = dense_prediction_cell.DensePredictionCell( + config=model_options.dense_prediction_cell_config, + hparams={ + 'conv_rate_multiplier': 16 // model_options.output_stride, + }) + concat_logits = dense_prediction_layer.build_cell( + features, + output_stride=model_options.output_stride, + crop_size=model_options.crop_size, + image_pooling_crop_size=model_options.image_pooling_crop_size, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm) + return concat_logits, end_points + else: + # The following codes employ the DeepLabv3 ASPP module. Note that we + # could express the ASPP module as one particular dense prediction + # cell architecture. We do not do so but leave the following codes + # for backward compatibility. + batch_norm_params = utils.get_batch_norm_params( + decay=0.9997, + epsilon=1e-5, + scale=True, + is_training=(is_training and fine_tune_batch_norm), + sync_batch_norm_method=model_options.sync_batch_norm_method) + batch_norm = utils.get_batch_norm_fn( + model_options.sync_batch_norm_method) + activation_fn = ( + tf.nn.relu6 if model_options.use_bounded_activation else tf.nn.relu) + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=activation_fn, + normalizer_fn=batch_norm, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([batch_norm], **batch_norm_params): + depth = model_options.aspp_convs_filters + branch_logits = [] + + if model_options.add_image_level_feature: + if model_options.crop_size is not None: + image_pooling_crop_size = model_options.image_pooling_crop_size + # If image_pooling_crop_size is not specified, use crop_size. + if image_pooling_crop_size is None: + image_pooling_crop_size = model_options.crop_size + pool_height = scale_dimension( + image_pooling_crop_size[0], + 1. / model_options.output_stride) + pool_width = scale_dimension( + image_pooling_crop_size[1], + 1. / model_options.output_stride) + image_feature = slim.avg_pool2d( + features, [pool_height, pool_width], + model_options.image_pooling_stride, padding='VALID') + resize_height = scale_dimension( + model_options.crop_size[0], + 1. / model_options.output_stride) + resize_width = scale_dimension( + model_options.crop_size[1], + 1. / model_options.output_stride) + else: + # If crop_size is None, we simply do global pooling. + pool_height = tf.shape(features)[1] + pool_width = tf.shape(features)[2] + image_feature = tf.reduce_mean( + features, axis=[1, 2], keepdims=True) + resize_height = pool_height + resize_width = pool_width + image_feature_activation_fn = tf.nn.relu + image_feature_normalizer_fn = batch_norm + if model_options.aspp_with_squeeze_and_excitation: + image_feature_activation_fn = tf.nn.sigmoid + if model_options.image_se_uses_qsigmoid: + image_feature_activation_fn = utils.q_sigmoid + image_feature_normalizer_fn = None + image_feature = slim.conv2d( + image_feature, depth, 1, + activation_fn=image_feature_activation_fn, + normalizer_fn=image_feature_normalizer_fn, + scope=IMAGE_POOLING_SCOPE) + image_feature = _resize_bilinear( + image_feature, + [resize_height, resize_width], + image_feature.dtype) + # Set shape for resize_height/resize_width if they are not Tensor. + if isinstance(resize_height, tf.Tensor): + resize_height = None + if isinstance(resize_width, tf.Tensor): + resize_width = None + image_feature.set_shape([None, resize_height, resize_width, depth]) + if not model_options.aspp_with_squeeze_and_excitation: + branch_logits.append(image_feature) + + # Employ a 1x1 convolution. + branch_logits.append(slim.conv2d(features, depth, 1, + scope=ASPP_SCOPE + str(0))) + + if model_options.atrous_rates: + # Employ 3x3 convolutions with different atrous rates. + for i, rate in enumerate(model_options.atrous_rates, 1): + scope = ASPP_SCOPE + str(i) + if model_options.aspp_with_separable_conv: + aspp_features = split_separable_conv2d( + features, + filters=depth, + rate=rate, + weight_decay=weight_decay, + scope=scope) + else: + aspp_features = slim.conv2d( + features, depth, 3, rate=rate, scope=scope) + branch_logits.append(aspp_features) + + # Merge branch logits. + concat_logits = tf.concat(branch_logits, 3) + if model_options.aspp_with_concat_projection: + concat_logits = slim.conv2d( + concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE) + concat_logits = slim.dropout( + concat_logits, + keep_prob=0.9, + is_training=is_training, + scope=CONCAT_PROJECTION_SCOPE + '_dropout') + if (model_options.add_image_level_feature and + model_options.aspp_with_squeeze_and_excitation): + concat_logits *= image_feature + + return concat_logits, end_points + + +def _get_logits(images, + model_options, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Gets the logits by atrous/image spatial pyramid pooling. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + outputs_to_logits: A map from output_type to logits. + """ + features, end_points = extract_features( + images, + model_options, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_training_hyper_parameters=nas_training_hyper_parameters) + + if model_options.decoder_output_stride: + crop_size = model_options.crop_size + if crop_size is None: + crop_size = [tf.shape(images)[1], tf.shape(images)[2]] + features = refine_by_decoder( + features, + end_points, + crop_size=crop_size, + decoder_output_stride=model_options.decoder_output_stride, + decoder_use_separable_conv=model_options.decoder_use_separable_conv, + decoder_use_sum_merge=model_options.decoder_use_sum_merge, + decoder_filters=model_options.decoder_filters, + decoder_output_is_logits=model_options.decoder_output_is_logits, + model_variant=model_options.model_variant, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + use_bounded_activation=model_options.use_bounded_activation) + + outputs_to_logits = {} + for output in sorted(model_options.outputs_to_num_classes): + if model_options.decoder_output_is_logits: + outputs_to_logits[output] = tf.identity(features, + name=output) + else: + outputs_to_logits[output] = get_branch_logits( + features, + model_options.outputs_to_num_classes[output], + model_options.atrous_rates, + aspp_with_batch_norm=model_options.aspp_with_batch_norm, + kernel_size=model_options.logits_kernel_size, + weight_decay=weight_decay, + reuse=reuse, + scope_suffix=output) + + return outputs_to_logits + + +def refine_by_decoder(features, + end_points, + crop_size=None, + decoder_output_stride=None, + decoder_use_separable_conv=False, + decoder_use_sum_merge=False, + decoder_filters=256, + decoder_output_is_logits=False, + model_variant=None, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + use_bounded_activation=False, + sync_batch_norm_method='None'): + """Adds the decoder to obtain sharper segmentation results. + + Args: + features: A tensor of size [batch, features_height, features_width, + features_channels]. + end_points: A dictionary from components of the network to the corresponding + activation. + crop_size: A tuple [crop_height, crop_width] specifying whole patch crop + size. + decoder_output_stride: A list of integers specifying the output stride of + low-level features used in the decoder module. + decoder_use_separable_conv: Employ separable convolution for decoder or not. + decoder_use_sum_merge: Boolean, decoder uses simple sum merge or not. + decoder_filters: Integer, decoder filter size. + decoder_output_is_logits: Boolean, using decoder output as logits or not. + model_variant: Model variant for feature extraction. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + sync_batch_norm_method: String, method used to sync batch norm. Currently + only support `None` (no sync batch norm) and `tpu` (use tpu code to + sync batch norm). + + Returns: + Decoder output with size [batch, decoder_height, decoder_width, + decoder_channels]. + + Raises: + ValueError: If crop_size is None. + """ + if crop_size is None: + raise ValueError('crop_size must be provided when using decoder.') + batch_norm_params = utils.get_batch_norm_params( + decay=0.9997, + epsilon=1e-5, + scale=True, + is_training=(is_training and fine_tune_batch_norm), + sync_batch_norm_method=sync_batch_norm_method) + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + decoder_depth = decoder_filters + projected_filters = 48 + if decoder_use_sum_merge: + # When using sum merge, the projected filters must be equal to decoder + # filters. + projected_filters = decoder_filters + if decoder_output_is_logits: + # Overwrite the setting when decoder output is logits. + activation_fn = None + normalizer_fn = None + conv2d_kernel = 1 + # Use original conv instead of separable conv. + decoder_use_separable_conv = False + else: + # Default setting when decoder output is not logits. + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + normalizer_fn = batch_norm + conv2d_kernel = 3 + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([batch_norm], **batch_norm_params): + with tf.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]): + decoder_features = features + decoder_stage = 0 + scope_suffix = '' + for output_stride in decoder_output_stride: + feature_list = feature_extractor.networks_to_feature_maps[ + model_variant][ + feature_extractor.DECODER_END_POINTS][output_stride] + # If only one decoder stage, we do not change the scope name in + # order for backward compactibility. + if decoder_stage: + scope_suffix = '_{}'.format(decoder_stage) + for i, name in enumerate(feature_list): + decoder_features_list = [decoder_features] + # MobileNet and NAS variants use different naming convention. + if ('mobilenet' in model_variant or + model_variant.startswith('mnas') or + model_variant.startswith('nas')): + feature_name = name + else: + feature_name = '{}/{}'.format( + feature_extractor.name_scope[model_variant], name) + decoder_features_list.append( + slim.conv2d( + end_points[feature_name], + projected_filters, + 1, + scope='feature_projection' + str(i) + scope_suffix)) + # Determine the output size. + decoder_height = scale_dimension(crop_size[0], 1.0 / output_stride) + decoder_width = scale_dimension(crop_size[1], 1.0 / output_stride) + # Resize to decoder_height/decoder_width. + for j, feature in enumerate(decoder_features_list): + decoder_features_list[j] = _resize_bilinear( + feature, [decoder_height, decoder_width], feature.dtype) + h = (None if isinstance(decoder_height, tf.Tensor) + else decoder_height) + w = (None if isinstance(decoder_width, tf.Tensor) + else decoder_width) + decoder_features_list[j].set_shape([None, h, w, None]) + if decoder_use_sum_merge: + decoder_features = _decoder_with_sum_merge( + decoder_features_list, + decoder_depth, + conv2d_kernel=conv2d_kernel, + decoder_use_separable_conv=decoder_use_separable_conv, + weight_decay=weight_decay, + scope_suffix=scope_suffix) + else: + if not decoder_use_separable_conv: + scope_suffix = str(i) + scope_suffix + decoder_features = _decoder_with_concat_merge( + decoder_features_list, + decoder_depth, + decoder_use_separable_conv=decoder_use_separable_conv, + weight_decay=weight_decay, + scope_suffix=scope_suffix) + decoder_stage += 1 + return decoder_features + + +def _decoder_with_sum_merge(decoder_features_list, + decoder_depth, + conv2d_kernel=3, + decoder_use_separable_conv=True, + weight_decay=0.0001, + scope_suffix=''): + """Decoder with sum to merge features. + + Args: + decoder_features_list: A list of decoder features. + decoder_depth: Integer, the filters used in the convolution. + conv2d_kernel: Integer, the convolution kernel size. + decoder_use_separable_conv: Boolean, use separable conv or not. + weight_decay: Weight decay for the model variables. + scope_suffix: String, used in the scope suffix. + + Returns: + decoder features merged with sum. + + Raises: + RuntimeError: If decoder_features_list have length not equal to 2. + """ + if len(decoder_features_list) != 2: + raise RuntimeError('Expect decoder_features has length 2.') + # Only apply one convolution when decoder use sum merge. + if decoder_use_separable_conv: + decoder_features = split_separable_conv2d( + decoder_features_list[0], + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_split_sep_conv0'+scope_suffix) + decoder_features_list[1] + else: + decoder_features = slim.conv2d( + decoder_features_list[0], + decoder_depth, + conv2d_kernel, + scope='decoder_conv0'+scope_suffix) + decoder_features_list[1] + return decoder_features + + +def _decoder_with_concat_merge(decoder_features_list, + decoder_depth, + decoder_use_separable_conv=True, + weight_decay=0.0001, + scope_suffix=''): + """Decoder with concatenation to merge features. + + This decoder method applies two convolutions to smooth the features obtained + by concatenating the input decoder_features_list. + + This decoder module is proposed in the DeepLabv3+ paper. + + Args: + decoder_features_list: A list of decoder features. + decoder_depth: Integer, the filters used in the convolution. + decoder_use_separable_conv: Boolean, use separable conv or not. + weight_decay: Weight decay for the model variables. + scope_suffix: String, used in the scope suffix. + + Returns: + decoder features merged with concatenation. + """ + if decoder_use_separable_conv: + decoder_features = split_separable_conv2d( + tf.concat(decoder_features_list, 3), + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_conv0'+scope_suffix) + decoder_features = split_separable_conv2d( + decoder_features, + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_conv1'+scope_suffix) + else: + num_convs = 2 + decoder_features = slim.repeat( + tf.concat(decoder_features_list, 3), + num_convs, + slim.conv2d, + decoder_depth, + 3, + scope='decoder_conv'+scope_suffix) + return decoder_features + + +def get_branch_logits(features, + num_classes, + atrous_rates=None, + aspp_with_batch_norm=False, + kernel_size=1, + weight_decay=0.0001, + reuse=None, + scope_suffix=''): + """Gets the logits from each model's branch. + + The underlying model is branched out in the last layer when atrous + spatial pyramid pooling is employed, and all branches are sum-merged + to form the final logits. + + Args: + features: A float tensor of shape [batch, height, width, channels]. + num_classes: Number of classes to predict. + atrous_rates: A list of atrous convolution rates for last layer. + aspp_with_batch_norm: Use batch normalization layers for ASPP. + kernel_size: Kernel size for convolution. + weight_decay: Weight decay for the model variables. + reuse: Reuse model variables or not. + scope_suffix: Scope suffix for the model variables. + + Returns: + Merged logits with shape [batch, height, width, num_classes]. + + Raises: + ValueError: Upon invalid input kernel_size value. + """ + # When using batch normalization with ASPP, ASPP has been applied before + # in extract_features, and thus we simply apply 1x1 convolution here. + if aspp_with_batch_norm or atrous_rates is None: + if kernel_size != 1: + raise ValueError('Kernel size must be 1 when atrous_rates is None or ' + 'using aspp_with_batch_norm. Gets %d.' % kernel_size) + atrous_rates = [1] + + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=tf.truncated_normal_initializer(stddev=0.01), + reuse=reuse): + with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): + branch_logits = [] + for i, rate in enumerate(atrous_rates): + scope = scope_suffix + if i: + scope += '_%d' % i + + branch_logits.append( + slim.conv2d( + features, + num_classes, + kernel_size=kernel_size, + rate=rate, + activation_fn=None, + normalizer_fn=None, + scope=scope)) + + return tf.add_n(branch_logits) diff --git a/deeplab/models/research/deeplab/model_test.py b/deeplab/models/research/deeplab/model_test.py new file mode 100644 index 0000000..d8413d7 --- /dev/null +++ b/deeplab/models/research/deeplab/model_test.py @@ -0,0 +1,148 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for DeepLab model and some helper functions.""" + +import tensorflow as tf + +from deeplab import common +from deeplab import model + + +class DeeplabModelTest(tf.test.TestCase): + + def testWrongDeepLabVariant(self): + model_options = common.ModelOptions([])._replace( + model_variant='no_such_variant') + with self.assertRaises(ValueError): + model._get_logits(images=[], model_options=model_options) + + def testBuildDeepLabv2(self): + batch_size = 2 + crop_size = [41, 41] + + # Test with two image_pyramids. + image_pyramids = [[1], [0.5, 1]] + + # Test two model variants. + model_variants = ['xception_65', 'mobilenet_v2'] + + # Test with two output_types. + outputs_to_num_classes = {'semantic': 3, + 'direction': 2} + + expected_endpoints = [['merged_logits'], + ['merged_logits', + 'logits_0.50', + 'logits_1.00']] + expected_num_logits = [1, 3] + + for model_variant in model_variants: + model_options = common.ModelOptions(outputs_to_num_classes)._replace( + add_image_level_feature=False, + aspp_with_batch_norm=False, + aspp_with_separable_conv=False, + model_variant=model_variant) + + for i, image_pyramid in enumerate(image_pyramids): + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g): + inputs = tf.random_uniform( + (batch_size, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_logits = model.multi_scale_logits( + inputs, model_options, image_pyramid=image_pyramid) + + # Check computed results for each output type. + for output in outputs_to_num_classes: + scales_to_logits = outputs_to_scales_to_logits[output] + self.assertListEqual(sorted(scales_to_logits.keys()), + sorted(expected_endpoints[i])) + + # Expected number of logits = len(image_pyramid) + 1, since the + # last logits is merged from all the scales. + self.assertEqual(len(scales_to_logits), expected_num_logits[i]) + + def testForwardpassDeepLabv3plus(self): + crop_size = [33, 33] + outputs_to_num_classes = {'semantic': 3} + + model_options = common.ModelOptions( + outputs_to_num_classes, + crop_size, + output_stride=16 + )._replace( + add_image_level_feature=True, + aspp_with_batch_norm=True, + logits_kernel_size=1, + decoder_output_stride=[4], + model_variant='mobilenet_v2') # Employ MobileNetv2 for fast test. + + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + inputs = tf.random_uniform( + (1, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_logits = model.multi_scale_logits( + inputs, + model_options, + image_pyramid=[1.0]) + + sess.run(tf.global_variables_initializer()) + outputs_to_scales_to_logits = sess.run(outputs_to_scales_to_logits) + + # Check computed results for each output type. + for output in outputs_to_num_classes: + scales_to_logits = outputs_to_scales_to_logits[output] + # Expect only one output. + self.assertEqual(len(scales_to_logits), 1) + for logits in scales_to_logits.values(): + self.assertTrue(logits.any()) + + def testBuildDeepLabWithDensePredictionCell(self): + batch_size = 1 + crop_size = [33, 33] + outputs_to_num_classes = {'semantic': 2} + expected_endpoints = ['merged_logits'] + dense_prediction_cell_config = [ + {'kernel': 3, 'rate': [1, 6], 'op': 'conv', 'input': -1}, + {'kernel': 3, 'rate': [18, 15], 'op': 'conv', 'input': 0}, + ] + model_options = common.ModelOptions( + outputs_to_num_classes, + crop_size, + output_stride=16)._replace( + aspp_with_batch_norm=True, + model_variant='mobilenet_v2', + dense_prediction_cell_config=dense_prediction_cell_config) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g): + inputs = tf.random_uniform( + (batch_size, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_model_results = model.multi_scale_logits( + inputs, + model_options, + image_pyramid=[1.0]) + for output in outputs_to_num_classes: + scales_to_model_results = outputs_to_scales_to_model_results[output] + self.assertListEqual( + list(scales_to_model_results), expected_endpoints) + self.assertEqual(len(scales_to_model_results), 1) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/setup.txt b/deeplab/models/research/deeplab/setup.txt new file mode 100644 index 0000000..06a68e3 --- /dev/null +++ b/deeplab/models/research/deeplab/setup.txt @@ -0,0 +1,78 @@ +python version 2.7 +tensorflow version 1.14.0 + +git clone "https://github.com/tensorflow/models.git" + +Folder Structure ++models + +deeplab + +datasets + +PQR (our custom dataset, make this dir) + +dataset + +ImageSets + - train.txt + - trainval.txt + - val.txt + +JPEGImages + - img_1.jpg + - img_2.jpg + +SegmentationClass + - img_1.jpg (notice same name as images in JPEGImages) + - img_2.jpg + +SegmentationClassRaw (don't make this directory, label_pqr.py will make it) + +exp + +train_on_trainval_set + +init_models + +deeplabv3_pascal_train_aug (download tar.gz file, put in init_models directory + and open it and this directory should appear) + +tfrecord + +Directory Explanations +-ImageSets +-> make 3 txt files + - train.txt (with names of training images in each row without image extension) + - val.txt (with names of validation images in each row without image extension) + - trainval.txt (with names of both training and validation images in each row without image extension) +-JPEGImages +-> all images +-SegmentationClass +-> segmentation masks with color +-SegmentationClassRaw +-> segmentation masks with indexed color, run label_pqr.py to get this +-exp +-> all training/evaluation/visualization files will go in here +-deeplabv3_pascal_train_aug: pretrained weights download +-> download this, put it in this folder, unpack it, done +-> http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz + +1: pip3 install --user gast==0.2.2 +2: pip3 install tf_slim +3: From tensorflow/models/research/ +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim +4: From tensorflow/models/research/deeplab/datasets +python3.7 datasets/label_pqr.py +5: From tensorflow/models/research/deeplab/datasets +python3.7 ./build_new_pqr_data.py --image_folder="./PQR/dataset/JPEGImages" --semantic_segmentation_folder="./PQR/dataset/SegmentationClassRaw" --list_folder="./PQR/dataset/ImageSets" --image_format="jpg" --output_dir="./PQR/tfrecord" +- note: replace paths like "./PQR/dataset/JPEGImages" with the complete path in case the script can't find it +6: From tensorflow/models/research/deeplab +bash train-pqr.sh +7: From tensorflow/models/research/deeplab +python3.7 inference.py +- note: change the class labels and image path at the very bottom of this script for now + +Notes: +- to add/change classes + - go to train-pqr.sh and change --num_classes=2 + - go to models/research/deeplab/datasets/data_generator.py and change: + _PQR_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 3, # number of file in the train folder + 'trainval': 5, + 'val': 2, + }, + num_classes=2, # number of classes in your dataset + ignore_label=255, # white edges that will be ignored to be class + ) + - go to models/research/deeplab/inference.py and update the class names at the very bottom + - go to datasets/label_pqr.py and add color:class pairs to palette + - put images correctly in ImageSets, JPEGImages, SegmentationClass \ No newline at end of file diff --git a/deeplab/models/research/deeplab/testing/info.md b/deeplab/models/research/deeplab/testing/info.md new file mode 100644 index 0000000..b84d2ad --- /dev/null +++ b/deeplab/models/research/deeplab/testing/info.md @@ -0,0 +1,6 @@ +This directory contains testing data. + +# pascal_voc_seg +This folder contains data specific to pascal_voc_seg dataset. val-00000-of-00001.tfrecord contains +three randomly generated images with format defined in +tensorflow/models/research/deeplab/datasets/build_voc2012_data.py. diff --git a/deeplab/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord b/deeplab/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord new file mode 100644 index 0000000..e81455b Binary files /dev/null and b/deeplab/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord differ diff --git a/deeplab/models/research/deeplab/train-pqr.sh b/deeplab/models/research/deeplab/train-pqr.sh new file mode 100644 index 0000000..2326370 --- /dev/null +++ b/deeplab/models/research/deeplab/train-pqr.sh @@ -0,0 +1,95 @@ +cd .. +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/deeplab" +DATASET_DIR="datasets" + +# Set up the working directories. +PQR_FOLDER="PQR" +EXP_FOLDER="exp/train_on_trainval_set" +# INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/${EXP_FOLDER}/init_models" +INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/init_models" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/${EXP_FOLDER}/train" +DATASET="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/tfrecord" +EVAL_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/${EXP_FOLDER}/eval" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/${EXP_FOLDER}/vis" +EXPORT_DIR="${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/${EXP_FOLDER}/export" + +mkdir -p "${WORK_DIR}/${DATASET_DIR}/${PQR_FOLDER}/exp" +mkdir -p "${TRAIN_LOGDIR}" +mkdir -p "${EVAL_LOGDIR}" +mkdir -p "${VIS_LOGDIR}" +mkdir -p "${EXPORT_DIR}" + +NUM_ITERATIONS=5 +python3.7 "${WORK_DIR}"/train.py \ + --logtostderr \ + --train_split="trainval" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="448,448" \ + --train_batch_size=4 \ + --training_number_of_steps="${NUM_ITERATIONS}" \ + --fine_tune_batch_norm=true \ + --tf_initial_checkpoint="${INIT_FOLDER}/deeplabv3_pascal_train_aug/model.ckpt" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --dataset_dir="${DATASET}" \ + --dataset="pqr" \ + --initialize_last_layer=False + +python3.7 "${WORK_DIR}"/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="448,448" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --eval_logdir="${EVAL_LOGDIR}" \ + --dataset_dir="${PQR_DATASET}" \ + --max_number_of_evaluations=1 + --dataset="pqr" + +# Visualize the results. +python3.7 "${WORK_DIR}"/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="448,448" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --dataset_dir="${PQR_DATASET}" \ + --max_number_of_iterations=1 + --dataset="pqr" + +# Export the trained checkpoint. +CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}" +EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb" + +python3.7 "${WORK_DIR}"/export_model.py \ + --logtostderr \ + --checkpoint_path="${CKPT_PATH}" \ + --export_path="${EXPORT_PATH}" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --num_classes=2 \ + --crop_size=448 \ + --crop_size=448 \ + --inference_scales=1.0 + diff --git a/deeplab/models/research/deeplab/train.py b/deeplab/models/research/deeplab/train.py new file mode 100644 index 0000000..fbe060d --- /dev/null +++ b/deeplab/models/research/deeplab/train.py @@ -0,0 +1,464 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Training script for the DeepLab model. + +See model.py for more details and usage. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import six +import tensorflow as tf +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import tfprof as contrib_tfprof +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator +from deeplab.utils import train_utils +from deployment import model_deploy + +slim = tf.contrib.slim +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Settings for multi-GPUs/multi-replicas training. + +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.') + +flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.') + +flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.') + +flags.DEFINE_integer('startup_delay_steps', 15, + 'Number of training steps between replicas startup.') + +flags.DEFINE_integer( + 'num_ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then ' + 'the parameters are handled locally by the worker.') + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_integer('task', 0, 'The task ID.') + +# Settings for logging. + +flags.DEFINE_string('train_logdir', None, + 'Where the checkpoint and logs are stored.') + +flags.DEFINE_integer('log_steps', 10, + 'Display logging information at every log_steps.') + +flags.DEFINE_integer('save_interval_secs', 1200, + 'How often, in seconds, we save the model to disk.') + +flags.DEFINE_integer('save_summaries_secs', 600, + 'How often, in seconds, we compute the summaries.') + +flags.DEFINE_boolean( + 'save_summaries_images', False, + 'Save sample inputs, labels, and semantic predictions as ' + 'images to summary.') + +# Settings for profiling. + +flags.DEFINE_string('profile_logdir', None, + 'Where the profile files are stored.') + +# Settings for training strategy. + +flags.DEFINE_enum('optimizer', 'momentum', ['momentum', 'adam'], + 'Which optimizer to use.') + + +# Momentum optimizer flags + +flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'], + 'Learning rate policy for training.') + +# Use 0.007 when training on PASCAL augmented training set, train_aug. When +# fine-tuning on PASCAL trainval set, use learning rate=0.0001. +flags.DEFINE_float('base_learning_rate', .0001, + 'The base learning rate for model training.') + +flags.DEFINE_float('decay_steps', 0.0, + 'Decay steps for polynomial learning rate schedule.') + +flags.DEFINE_float('end_learning_rate', 0.0, + 'End learning rate for polynomial learning rate schedule.') + +flags.DEFINE_float('learning_rate_decay_factor', 0.1, + 'The rate to decay the base learning rate.') + +flags.DEFINE_integer('learning_rate_decay_step', 2000, + 'Decay the base learning rate at a fixed step.') + +flags.DEFINE_float('learning_power', 0.9, + 'The power value used in the poly learning policy.') + +flags.DEFINE_integer('training_number_of_steps', 30000, + 'The number of steps used for training') + +flags.DEFINE_float('momentum', 0.9, 'The momentum value to use') + +# Adam optimizer flags +flags.DEFINE_float('adam_learning_rate', 0.001, + 'Learning rate for the adam optimizer.') +flags.DEFINE_float('adam_epsilon', 1e-08, 'Adam optimizer epsilon.') + +# When fine_tune_batch_norm=True, use at least batch size larger than 12 +# (batch size more than 16 is better). Otherwise, one could use smaller batch +# size and set fine_tune_batch_norm=False. +flags.DEFINE_integer('train_batch_size', 8, + 'The number of images in each batch during training.') + +# For weight_decay, use 0.00004 for MobileNet-V2 or Xcpetion model variants. +# Use 0.0001 for ResNet model variants. +flags.DEFINE_float('weight_decay', 0.00004, + 'The value of the weight decay for training.') + +flags.DEFINE_list('train_crop_size', '513,513', + 'Image crop size [height, width] during training.') + +flags.DEFINE_float( + 'last_layer_gradient_multiplier', 1.0, + 'The gradient multiplier for last layers, which is used to ' + 'boost the gradient of last layers if the value > 1.') + +flags.DEFINE_boolean('upsample_logits', True, + 'Upsample logits during training.') + +# Hyper-parameters for NAS training strategy. + +flags.DEFINE_float( + 'drop_path_keep_prob', 1.0, + 'Probability to keep each path in the NAS cell when training.') + +# Settings for fine-tuning the network. + +flags.DEFINE_string('tf_initial_checkpoint', None, + 'The initial checkpoint in tensorflow format.') + +# Set to False if one does not want to re-use the trained classifier weights. +flags.DEFINE_boolean('initialize_last_layer', True, + 'Initialize the last layer.') + +flags.DEFINE_boolean('last_layers_contain_logits_only', False, + 'Only consider logits as last layers or not.') + +flags.DEFINE_integer('slow_start_step', 0, + 'Training model with small learning rate for few steps.') + +flags.DEFINE_float('slow_start_learning_rate', 1e-4, + 'Learning rate employed during slow start.') + +# Set to True if one wants to fine-tune the batch norm parameters in DeepLabv3. +# Set to False and use small batch size to save GPU memory. +flags.DEFINE_boolean('fine_tune_batch_norm', True, + 'Fine tune the batch norm parameters or not.') + +flags.DEFINE_float('min_scale_factor', 0.5, + 'Mininum scale factor for data augmentation.') + +flags.DEFINE_float('max_scale_factor', 2., + 'Maximum scale factor for data augmentation.') + +flags.DEFINE_float('scale_factor_step_size', 0.25, + 'Scale factor step size for data augmentation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Hard example mining related flags. +flags.DEFINE_integer( + 'hard_example_mining_step', 0, + 'The training step in which exact hard example mining kicks off. Note we ' + 'gradually reduce the mining percent to the specified ' + 'top_k_percent_pixels. For example, if hard_example_mining_step=100K and ' + 'top_k_percent_pixels=0.25, then mining percent will gradually reduce from ' + '100% to 25% until 100K steps after which we only mine top 25% pixels.') + +flags.DEFINE_float( + 'top_k_percent_pixels', 1.0, + 'The top k percent pixels (in terms of the loss values) used to compute ' + 'loss during training. This is useful for hard pixel mining.') + +# Quantization setting. +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('train_split', 'train', + 'Which split of the dataset to be used for training') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + + +def _build_deeplab(iterator, outputs_to_num_classes, ignore_label): + """Builds a clone of DeepLab. + + Args: + iterator: An iterator of type tf.data.Iterator for images and labels. + outputs_to_num_classes: A map from output type to the number of classes. For + example, for the task of semantic segmentation with 21 semantic classes, + we would have outputs_to_num_classes['semantic'] = 21. + ignore_label: Ignore label. + """ + samples = iterator.get_next() + + # Add name to input and label nodes so we can add to summary. + samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name=common.IMAGE) + samples[common.LABEL] = tf.identity(samples[common.LABEL], name=common.LABEL) + + model_options = common.ModelOptions( + outputs_to_num_classes=outputs_to_num_classes, + crop_size=[int(sz) for sz in FLAGS.train_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + outputs_to_scales_to_logits = model.multi_scale_logits( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + weight_decay=FLAGS.weight_decay, + is_training=True, + fine_tune_batch_norm=FLAGS.fine_tune_batch_norm, + nas_training_hyper_parameters={ + 'drop_path_keep_prob': FLAGS.drop_path_keep_prob, + 'total_training_steps': FLAGS.training_number_of_steps, + }) + + # Add name to graph node so we can add to summary. + output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE] + output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity( + output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE) + + for output, num_classes in six.iteritems(outputs_to_num_classes): + train_utils.add_softmax_cross_entropy_loss_for_each_scale( + outputs_to_scales_to_logits[output], + samples[common.LABEL], + num_classes, + ignore_label, + loss_weight=model_options.label_weights, + upsample_logits=FLAGS.upsample_logits, + hard_example_mining_step=FLAGS.hard_example_mining_step, + top_k_percent_pixels=FLAGS.top_k_percent_pixels, + scope=output) + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + # Set up deployment (i.e., multi-GPUs and/or multi-replicas). + config = model_deploy.DeploymentConfig( + num_clones=FLAGS.num_clones, + clone_on_cpu=FLAGS.clone_on_cpu, + replica_id=FLAGS.task, + num_replicas=FLAGS.num_replicas, + num_ps_tasks=FLAGS.num_ps_tasks) + + # Split the batch across GPUs. + assert FLAGS.train_batch_size % config.num_clones == 0, ( + 'Training batch size not divisble by number of clones (GPUs).') + + clone_batch_size = FLAGS.train_batch_size // config.num_clones + + tf.gfile.MakeDirs(FLAGS.train_logdir) + tf.logging.info('Training on %s set', FLAGS.train_split) + + with tf.Graph().as_default() as graph: + with tf.device(config.inputs_device()): + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.train_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=clone_batch_size, + crop_size=[int(sz) for sz in FLAGS.train_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + min_scale_factor=FLAGS.min_scale_factor, + max_scale_factor=FLAGS.max_scale_factor, + scale_factor_step_size=FLAGS.scale_factor_step_size, + model_variant=FLAGS.model_variant, + num_readers=4, + is_training=True, + should_shuffle=True, + should_repeat=True) + + # Create the global step on the device storing the variables. + with tf.device(config.variables_device()): + global_step = tf.train.get_or_create_global_step() + + # Define the model and create clones. + model_fn = _build_deeplab + model_args = (dataset.get_one_shot_iterator(), { + common.OUTPUT_TYPE: dataset.num_of_classes + }, dataset.ignore_label) + clones = model_deploy.create_clones(config, model_fn, args=model_args) + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + first_clone_scope = config.clone_scope(0) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + # Gather initial summaries. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + + # Add summaries for model variables. + for model_var in tf.model_variables(): + summaries.add(tf.summary.histogram(model_var.op.name, model_var)) + + # Add summaries for images, labels, semantic predictions + if FLAGS.save_summaries_images: + summary_image = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/')) + summaries.add( + tf.summary.image('samples/%s' % common.IMAGE, summary_image)) + + first_clone_label = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/')) + # Scale up summary image pixel values for better visualization. + pixel_scaling = max(1, 255 // dataset.num_of_classes) + summary_label = tf.cast(first_clone_label * pixel_scaling, tf.uint8) + summaries.add( + tf.summary.image('samples/%s' % common.LABEL, summary_label)) + + first_clone_output = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/')) + predictions = tf.expand_dims(tf.argmax(first_clone_output, 3), -1) + + summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8) + summaries.add( + tf.summary.image( + 'samples/%s' % common.OUTPUT_TYPE, summary_predictions)) + + # Add summaries for losses. + for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): + summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) + + # Build the optimizer based on the device specification. + with tf.device(config.optimizer_device()): + learning_rate = train_utils.get_model_learning_rate( + FLAGS.learning_policy, + FLAGS.base_learning_rate, + FLAGS.learning_rate_decay_step, + FLAGS.learning_rate_decay_factor, + FLAGS.training_number_of_steps, + FLAGS.learning_power, + FLAGS.slow_start_step, + FLAGS.slow_start_learning_rate, + decay_steps=FLAGS.decay_steps, + end_learning_rate=FLAGS.end_learning_rate) + + summaries.add(tf.summary.scalar('learning_rate', learning_rate)) + + if FLAGS.optimizer == 'momentum': + optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) + elif FLAGS.optimizer == 'adam': + optimizer = tf.train.AdamOptimizer( + learning_rate=FLAGS.adam_learning_rate, epsilon=FLAGS.adam_epsilon) + else: + raise ValueError('Unknown optimizer') + + if FLAGS.quantize_delay_step >= 0: + if FLAGS.num_clones > 1: + raise ValueError('Quantization doesn\'t support multi-clone yet.') + contrib_quantize.create_training_graph( + quant_delay=FLAGS.quantize_delay_step) + + startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps + + with tf.device(config.variables_device()): + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, optimizer) + total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.') + summaries.add(tf.summary.scalar('total_loss', total_loss)) + + # Modify the gradients for biases and last layer variables. + last_layers = model.get_extra_layer_scopes( + FLAGS.last_layers_contain_logits_only) + grad_mult = train_utils.get_model_gradient_multipliers( + last_layers, FLAGS.last_layer_gradient_multiplier) + if grad_mult: + grads_and_vars = slim.learning.multiply_gradients( + grads_and_vars, grad_mult) + + # Create gradient update op. + grad_updates = optimizer.apply_gradients( + grads_and_vars, global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops) + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set( + tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries)) + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto( + allow_soft_placement=True, log_device_placement=False) + + # Start the training. + profile_dir = FLAGS.profile_logdir + if profile_dir is not None: + tf.gfile.MakeDirs(profile_dir) + + with contrib_tfprof.ProfileContext( + enabled=profile_dir is not None, profile_dir=profile_dir): + init_fn = None + if FLAGS.tf_initial_checkpoint: + init_fn = train_utils.get_model_init_fn( + FLAGS.train_logdir, + FLAGS.tf_initial_checkpoint, + FLAGS.initialize_last_layer, + last_layers, + ignore_missing_vars=True) + + slim.learning.train( + train_tensor, + logdir=FLAGS.train_logdir, + log_every_n_steps=FLAGS.log_steps, + master=FLAGS.master, + number_of_steps=FLAGS.training_number_of_steps, + is_chief=(FLAGS.task == 0), + session_config=session_config, + startup_delay_steps=startup_delay_steps, + init_fn=init_fn, + summary_op=summary_op, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs) + + +if __name__ == '__main__': + flags.mark_flag_as_required('train_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/deeplab/models/research/deeplab/utils/__init__.py b/deeplab/models/research/deeplab/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deeplab/models/research/deeplab/utils/get_dataset_colormap.py b/deeplab/models/research/deeplab/utils/get_dataset_colormap.py new file mode 100644 index 0000000..c0502e3 --- /dev/null +++ b/deeplab/models/research/deeplab/utils/get_dataset_colormap.py @@ -0,0 +1,416 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Visualizes the segmentation results via specified color map. + +Visualizes the semantic segmentation results by the color map +defined by the different datasets. Supported colormaps are: + +* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/). + +* Cityscapes dataset (https://www.cityscapes-dataset.com). + +* Mapillary Vistas (https://research.mapillary.com). + +* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from six.moves import range + +# Dataset names. +_ADE20K = 'ade20k' +_CITYSCAPES = 'cityscapes' +_MAPILLARY_VISTAS = 'mapillary_vistas' +_PASCAL = 'pascal' + +# Max number of entries in the colormap for each dataset. +_DATASET_MAX_ENTRIES = { + _ADE20K: 151, + _CITYSCAPES: 256, + _MAPILLARY_VISTAS: 66, + _PASCAL: 512, +} + + +def create_ade20k_label_colormap(): + """Creates a label colormap used in ADE20K segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + return np.asarray([ + [0, 0, 0], + [120, 120, 120], + [180, 120, 120], + [6, 230, 230], + [80, 50, 50], + [4, 200, 3], + [120, 120, 80], + [140, 140, 140], + [204, 5, 255], + [230, 230, 230], + [4, 250, 7], + [224, 5, 255], + [235, 255, 7], + [150, 5, 61], + [120, 120, 70], + [8, 255, 51], + [255, 6, 82], + [143, 255, 140], + [204, 255, 4], + [255, 51, 7], + [204, 70, 3], + [0, 102, 200], + [61, 230, 250], + [255, 6, 51], + [11, 102, 255], + [255, 7, 71], + [255, 9, 224], + [9, 7, 230], + [220, 220, 220], + [255, 9, 92], + [112, 9, 255], + [8, 255, 214], + [7, 255, 224], + [255, 184, 6], + [10, 255, 71], + [255, 41, 10], + [7, 255, 255], + [224, 255, 8], + [102, 8, 255], + [255, 61, 6], + [255, 194, 7], + [255, 122, 8], + [0, 255, 20], + [255, 8, 41], + [255, 5, 153], + [6, 51, 255], + [235, 12, 255], + [160, 150, 20], + [0, 163, 255], + [140, 140, 140], + [250, 10, 15], + [20, 255, 0], + [31, 255, 0], + [255, 31, 0], + [255, 224, 0], + [153, 255, 0], + [0, 0, 255], + [255, 71, 0], + [0, 235, 255], + [0, 173, 255], + [31, 0, 255], + [11, 200, 200], + [255, 82, 0], + [0, 255, 245], + [0, 61, 255], + [0, 255, 112], + [0, 255, 133], + [255, 0, 0], + [255, 163, 0], + [255, 102, 0], + [194, 255, 0], + [0, 143, 255], + [51, 255, 0], + [0, 82, 255], + [0, 255, 41], + [0, 255, 173], + [10, 0, 255], + [173, 255, 0], + [0, 255, 153], + [255, 92, 0], + [255, 0, 255], + [255, 0, 245], + [255, 0, 102], + [255, 173, 0], + [255, 0, 20], + [255, 184, 184], + [0, 31, 255], + [0, 255, 61], + [0, 71, 255], + [255, 0, 204], + [0, 255, 194], + [0, 255, 82], + [0, 10, 255], + [0, 112, 255], + [51, 0, 255], + [0, 194, 255], + [0, 122, 255], + [0, 255, 163], + [255, 153, 0], + [0, 255, 10], + [255, 112, 0], + [143, 255, 0], + [82, 0, 255], + [163, 255, 0], + [255, 235, 0], + [8, 184, 170], + [133, 0, 255], + [0, 255, 92], + [184, 0, 255], + [255, 0, 31], + [0, 184, 255], + [0, 214, 255], + [255, 0, 112], + [92, 255, 0], + [0, 224, 255], + [112, 224, 255], + [70, 184, 160], + [163, 0, 255], + [153, 0, 255], + [71, 255, 0], + [255, 0, 163], + [255, 204, 0], + [255, 0, 143], + [0, 255, 235], + [133, 255, 0], + [255, 0, 235], + [245, 0, 255], + [255, 0, 122], + [255, 245, 0], + [10, 190, 212], + [214, 255, 0], + [0, 204, 255], + [20, 0, 255], + [255, 255, 0], + [0, 153, 255], + [0, 41, 255], + [0, 255, 204], + [41, 0, 255], + [41, 255, 0], + [173, 0, 255], + [0, 245, 255], + [71, 0, 255], + [122, 0, 255], + [0, 255, 184], + [0, 92, 255], + [184, 255, 0], + [0, 133, 255], + [255, 214, 0], + [25, 194, 194], + [102, 255, 0], + [92, 0, 255], + ]) + + +def create_cityscapes_label_colormap(): + """Creates a label colormap used in CITYSCAPES segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + return colormap + + +def create_mapillary_vistas_label_colormap(): + """Creates a label colormap used in Mapillary Vistas segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + return np.asarray([ + [165, 42, 42], + [0, 192, 0], + [196, 196, 196], + [190, 153, 153], + [180, 165, 180], + [102, 102, 156], + [102, 102, 156], + [128, 64, 255], + [140, 140, 200], + [170, 170, 170], + [250, 170, 160], + [96, 96, 96], + [230, 150, 140], + [128, 64, 128], + [110, 110, 110], + [244, 35, 232], + [150, 100, 100], + [70, 70, 70], + [150, 120, 90], + [220, 20, 60], + [255, 0, 0], + [255, 0, 0], + [255, 0, 0], + [200, 128, 128], + [255, 255, 255], + [64, 170, 64], + [128, 64, 64], + [70, 130, 180], + [255, 255, 255], + [152, 251, 152], + [107, 142, 35], + [0, 170, 30], + [255, 255, 128], + [250, 0, 30], + [0, 0, 0], + [220, 220, 220], + [170, 170, 170], + [222, 40, 40], + [100, 170, 30], + [40, 40, 40], + [33, 33, 33], + [170, 170, 170], + [0, 0, 142], + [170, 170, 170], + [210, 170, 100], + [153, 153, 153], + [128, 128, 128], + [0, 0, 142], + [250, 170, 30], + [192, 192, 192], + [220, 220, 0], + [180, 165, 180], + [119, 11, 32], + [0, 0, 142], + [0, 60, 100], + [0, 0, 142], + [0, 0, 90], + [0, 0, 230], + [0, 80, 100], + [128, 64, 64], + [0, 0, 110], + [0, 0, 70], + [0, 0, 192], + [32, 32, 32], + [0, 0, 0], + [0, 0, 0], + ]) + + +def create_pascal_label_colormap(): + """Creates a label colormap used in PASCAL VOC segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int) + ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int) + + for shift in reversed(list(range(8))): + for channel in range(3): + colormap[:, channel] |= bit_get(ind, channel) << shift + ind >>= 3 + + return colormap + + +def get_ade20k_name(): + return _ADE20K + + +def get_cityscapes_name(): + return _CITYSCAPES + + +def get_mapillary_vistas_name(): + return _MAPILLARY_VISTAS + + +def get_pascal_name(): + return _PASCAL + + +def bit_get(val, idx): + """Gets the bit value. + + Args: + val: Input value, int or numpy int array. + idx: Which bit of the input val. + + Returns: + The "idx"-th bit of input val. + """ + return (val >> idx) & 1 + + +def create_label_colormap(dataset=_PASCAL): + """Creates a label colormap for the specified dataset. + + Args: + dataset: The colormap used in the dataset. + + Returns: + A numpy array of the dataset colormap. + + Raises: + ValueError: If the dataset is not supported. + """ + if dataset == _ADE20K: + return create_ade20k_label_colormap() + elif dataset == _CITYSCAPES: + return create_cityscapes_label_colormap() + elif dataset == _MAPILLARY_VISTAS: + return create_mapillary_vistas_label_colormap() + elif dataset == _PASCAL: + return create_pascal_label_colormap() + else: + raise ValueError('Unsupported dataset.') + + +def label_to_color_image(label, dataset=_PASCAL): + """Adds color defined by the dataset colormap to the label. + + Args: + label: A 2D array with integer type, storing the segmentation label. + dataset: The colormap used in the dataset. + + Returns: + result: A 2D array with floating type. The element of the array + is the color indexed by the corresponding element in the input label + to the dataset color map. + + Raises: + ValueError: If label is not of rank 2 or its value is larger than color + map maximum entry. + """ + if label.ndim != 2: + raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) + + if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]: + raise ValueError( + 'label value too large: {} >= {}.'.format( + np.max(label), _DATASET_MAX_ENTRIES[dataset])) + + colormap = create_label_colormap(dataset) + return colormap[label] + + +def get_dataset_colormap_max_entries(dataset): + return _DATASET_MAX_ENTRIES[dataset] diff --git a/deeplab/models/research/deeplab/utils/get_dataset_colormap_test.py b/deeplab/models/research/deeplab/utils/get_dataset_colormap_test.py new file mode 100644 index 0000000..89adb2c --- /dev/null +++ b/deeplab/models/research/deeplab/utils/get_dataset_colormap_test.py @@ -0,0 +1,97 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for get_dataset_colormap.py.""" + +import numpy as np +import tensorflow as tf + +from deeplab.utils import get_dataset_colormap + + +class VisualizationUtilTest(tf.test.TestCase): + + def testBitGet(self): + """Test that if the returned bit value is correct.""" + self.assertEqual(1, get_dataset_colormap.bit_get(9, 0)) + self.assertEqual(0, get_dataset_colormap.bit_get(9, 1)) + self.assertEqual(0, get_dataset_colormap.bit_get(9, 2)) + self.assertEqual(1, get_dataset_colormap.bit_get(9, 3)) + + def testPASCALLabelColorMapValue(self): + """Test the getd color map value.""" + colormap = get_dataset_colormap.create_pascal_label_colormap() + + # Only test a few sampled entries in the color map. + self.assertTrue(np.array_equal([128., 0., 128.], colormap[5, :])) + self.assertTrue(np.array_equal([128., 192., 128.], colormap[23, :])) + self.assertTrue(np.array_equal([128., 0., 192.], colormap[37, :])) + self.assertTrue(np.array_equal([224., 192., 192.], colormap[127, :])) + self.assertTrue(np.array_equal([192., 160., 192.], colormap[175, :])) + + def testLabelToPASCALColorImage(self): + """Test the value of the converted label value.""" + label = np.array([[0, 16, 16], [52, 7, 52]]) + expected_result = np.array([ + [[0, 0, 0], [0, 64, 0], [0, 64, 0]], + [[0, 64, 192], [128, 128, 128], [0, 64, 192]] + ]) + colored_label = get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + self.assertTrue(np.array_equal(expected_result, colored_label)) + + def testUnExpectedLabelValueForLabelToPASCALColorImage(self): + """Raise ValueError when input value exceeds range.""" + label = np.array([[120], [600]]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + + def testUnExpectedLabelDimensionForLabelToPASCALColorImage(self): + """Raise ValueError if input dimension is not correct.""" + label = np.array([120]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + + def testGetColormapForUnsupportedDataset(self): + with self.assertRaises(ValueError): + get_dataset_colormap.create_label_colormap('unsupported_dataset') + + def testUnExpectedLabelDimensionForLabelToADE20KColorImage(self): + label = np.array([250]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_ade20k_name()) + + def testFirstColorInADE20KColorMap(self): + label = np.array([[1, 3], [10, 20]]) + expected_result = np.array([ + [[120, 120, 120], [6, 230, 230]], + [[4, 250, 7], [204, 70, 3]] + ]) + colored_label = get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_ade20k_name()) + self.assertTrue(np.array_equal(colored_label, expected_result)) + + def testMapillaryVistasColorMapValue(self): + colormap = get_dataset_colormap.create_mapillary_vistas_label_colormap() + self.assertTrue(np.array_equal([190, 153, 153], colormap[3, :])) + self.assertTrue(np.array_equal([102, 102, 156], colormap[6, :])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/deeplab/models/research/deeplab/utils/save_annotation.py b/deeplab/models/research/deeplab/utils/save_annotation.py new file mode 100644 index 0000000..2444df7 --- /dev/null +++ b/deeplab/models/research/deeplab/utils/save_annotation.py @@ -0,0 +1,66 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Saves an annotation as one png image. + +This script saves an annotation as one png image, and has the option to add +colormap to the png image for better visualization. +""" + +import numpy as np +import PIL.Image as img +import tensorflow as tf + +from deeplab.utils import get_dataset_colormap + + +def save_annotation(label, + save_dir, + filename, + add_colormap=True, + normalize_to_unit_values=False, + scale_values=False, + colormap_type=get_dataset_colormap.get_pascal_name()): + """Saves the given label to image on disk. + + Args: + label: The numpy array to be saved. The data will be converted + to uint8 and saved as png image. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + add_colormap: Boolean, add color map to the label or not. + normalize_to_unit_values: Boolean, normalize the input values to [0, 1]. + scale_values: Boolean, scale the input values to [0, 255] for visualization. + colormap_type: String, colormap type for visualization. + """ + # Add colormap for visualizing the prediction. + if add_colormap: + colored_label = get_dataset_colormap.label_to_color_image( + label, colormap_type) + else: + colored_label = label + if normalize_to_unit_values: + min_value = np.amin(colored_label) + max_value = np.amax(colored_label) + range_value = max_value - min_value + if range_value != 0: + colored_label = (colored_label - min_value) / range_value + + if scale_values: + colored_label = 255. * colored_label + + pil_image = img.fromarray(colored_label.astype(dtype=np.uint8)) + with tf.gfile.Open('%s/%s.png' % (save_dir, filename), mode='w') as f: + pil_image.save(f, 'PNG') diff --git a/deeplab/models/research/deeplab/utils/train_utils.py b/deeplab/models/research/deeplab/utils/train_utils.py new file mode 100644 index 0000000..14bbd6e --- /dev/null +++ b/deeplab/models/research/deeplab/utils/train_utils.py @@ -0,0 +1,372 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for training.""" + +import six +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework + +from deeplab.core import preprocess_utils +from deeplab.core import utils + + +def _div_maybe_zero(total_loss, num_present): + """Normalizes the total loss with the number of present pixels.""" + return tf.to_float(num_present > 0) * tf.math.divide( + total_loss, + tf.maximum(1e-5, num_present)) + + +def add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits, + labels, + num_classes, + ignore_label, + loss_weight=1.0, + upsample_logits=True, + hard_example_mining_step=0, + top_k_percent_pixels=1.0, + gt_is_matting_map=False, + scope=None): + """Adds softmax cross entropy loss for logits of each scale. + + Args: + scales_to_logits: A map from logits names for different scales to logits. + The logits have shape [batch, logits_height, logits_width, num_classes]. + labels: Groundtruth labels with shape [batch, image_height, image_width, 1]. + num_classes: Integer, number of target classes. + ignore_label: Integer, label to ignore. + loss_weight: A float or a list of loss weights. If it is a float, it means + all the labels have the same weight. If it is a list of weights, then each + element in the list represents the weight for the label of its index, for + example, loss_weight = [0.1, 0.5] means the weight for label 0 is 0.1 and + the weight for label 1 is 0.5. + upsample_logits: Boolean, upsample logits or not. + hard_example_mining_step: An integer, the training step in which the hard + exampling mining kicks off. Note that we gradually reduce the mining + percent to the top_k_percent_pixels. For example, if + hard_example_mining_step = 100K and top_k_percent_pixels = 0.25, then + mining percent will gradually reduce from 100% to 25% until 100K steps + after which we only mine top 25% pixels. + top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value + < 1.0, only compute the loss for the top k percent pixels (e.g., the top + 20% pixels). This is useful for hard pixel mining. + gt_is_matting_map: If true, the groundtruth is a matting map of confidence + score. If false, the groundtruth is an integer valued class mask. + scope: String, the scope for the loss. + + Raises: + ValueError: Label or logits is None, or groundtruth is matting map while + label is not floating value. + """ + if labels is None: + raise ValueError('No label for softmax cross entropy loss.') + + # If input groundtruth is a matting map of confidence, check if the input + # labels are floating point values. + if gt_is_matting_map and not labels.dtype.is_floating: + raise ValueError('Labels must be floats if groundtruth is a matting map.') + + for scale, logits in six.iteritems(scales_to_logits): + loss_scope = None + if scope: + loss_scope = '%s_%s' % (scope, scale) + + if upsample_logits: + # Label is not downsampled, and instead we upsample logits. + logits = tf.image.resize_bilinear( + logits, + preprocess_utils.resolve_shape(labels, 4)[1:3], + align_corners=True) + scaled_labels = labels + else: + # Label is downsampled to the same size as logits. + # When gt_is_matting_map = true, label downsampling with nearest neighbor + # method may introduce artifacts. However, to avoid ignore_label from + # being interpolated with other labels, we still perform nearest neighbor + # interpolation. + # TODO(huizhongc): Change to bilinear interpolation by processing padded + # and non-padded label separately. + if gt_is_matting_map: + tf.logging.warning( + 'Label downsampling with nearest neighbor may introduce artifacts.') + + scaled_labels = tf.image.resize_nearest_neighbor( + labels, + preprocess_utils.resolve_shape(logits, 4)[1:3], + align_corners=True) + + scaled_labels = tf.reshape(scaled_labels, shape=[-1]) + weights = utils.get_label_weight_mask( + scaled_labels, ignore_label, num_classes, label_weights=loss_weight) + # Dimension of keep_mask is equal to the total number of pixels. + keep_mask = tf.cast( + tf.not_equal(scaled_labels, ignore_label), dtype=tf.float32) + + train_labels = None + logits = tf.reshape(logits, shape=[-1, num_classes]) + + if gt_is_matting_map: + # When the groundtruth is integer label mask, we can assign class + # dependent label weights to the loss. When the groundtruth is image + # matting confidence, we do not apply class-dependent label weight (i.e., + # label_weight = 1.0). + if loss_weight != 1.0: + raise ValueError( + 'loss_weight must equal to 1 if groundtruth is matting map.') + + # Assign label value 0 to ignore pixels. The exact label value of ignore + # pixel does not matter, because those ignore_value pixel losses will be + # multiplied to 0 weight. + train_labels = scaled_labels * keep_mask + + train_labels = tf.expand_dims(train_labels, 1) + train_labels = tf.concat([1 - train_labels, train_labels], axis=1) + else: + train_labels = tf.one_hot( + scaled_labels, num_classes, on_value=1.0, off_value=0.0) + + default_loss_scope = ('softmax_all_pixel_loss' + if top_k_percent_pixels == 1.0 else + 'softmax_hard_example_mining') + with tf.name_scope(loss_scope, default_loss_scope, + [logits, train_labels, weights]): + # Compute the loss for all pixels. + pixel_losses = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=tf.stop_gradient( + train_labels, name='train_labels_stop_gradient'), + logits=logits, + name='pixel_losses') + weighted_pixel_losses = tf.multiply(pixel_losses, weights) + + if top_k_percent_pixels == 1.0: + total_loss = tf.reduce_sum(weighted_pixel_losses) + num_present = tf.reduce_sum(keep_mask) + loss = _div_maybe_zero(total_loss, num_present) + tf.losses.add_loss(loss) + else: + num_pixels = tf.to_float(tf.shape(logits)[0]) + # Compute the top_k_percent pixels based on current training step. + if hard_example_mining_step == 0: + # Directly focus on the top_k pixels. + top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels) + else: + # Gradually reduce the mining percent to top_k_percent_pixels. + global_step = tf.to_float(tf.train.get_or_create_global_step()) + ratio = tf.minimum(1.0, global_step / hard_example_mining_step) + top_k_pixels = tf.to_int32( + (ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels) + top_k_losses, _ = tf.nn.top_k(weighted_pixel_losses, + k=top_k_pixels, + sorted=True, + name='top_k_percent_pixels') + total_loss = tf.reduce_sum(top_k_losses) + num_present = tf.reduce_sum( + tf.to_float(tf.not_equal(top_k_losses, 0.0))) + loss = _div_maybe_zero(total_loss, num_present) + tf.losses.add_loss(loss) + + +def get_model_init_fn(train_logdir, + tf_initial_checkpoint, + initialize_last_layer, + last_layers, + ignore_missing_vars=False): + """Gets the function initializing model variables from a checkpoint. + + Args: + train_logdir: Log directory for training. + tf_initial_checkpoint: TensorFlow checkpoint for initialization. + initialize_last_layer: Initialize last layer or not. + last_layers: Last layers of the model. + ignore_missing_vars: Ignore missing variables in the checkpoint. + + Returns: + Initialization function. + """ + if tf_initial_checkpoint is None: + tf.logging.info('Not initializing the model from a checkpoint.') + return None + + if tf.train.latest_checkpoint(train_logdir): + tf.logging.info('Ignoring initialization; other checkpoint exists') + return None + + tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint) + + # Variables that will not be restored. + exclude_list = ['global_step'] + if not initialize_last_layer: + exclude_list.extend(last_layers) + + variables_to_restore = contrib_framework.get_variables_to_restore( + exclude=exclude_list) + + if variables_to_restore: + init_op, init_feed_dict = contrib_framework.assign_from_checkpoint( + tf_initial_checkpoint, + variables_to_restore, + ignore_missing_vars=ignore_missing_vars) + global_step = tf.train.get_or_create_global_step() + + def restore_fn(sess): + sess.run(init_op, init_feed_dict) + sess.run([global_step]) + + return restore_fn + + return None + + +def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier): + """Gets the gradient multipliers. + + The gradient multipliers will adjust the learning rates for model + variables. For the task of semantic segmentation, the models are + usually fine-tuned from the models trained on the task of image + classification. To fine-tune the models, we usually set larger (e.g., + 10 times larger) learning rate for the parameters of last layer. + + Args: + last_layers: Scopes of last layers. + last_layer_gradient_multiplier: The gradient multiplier for last layers. + + Returns: + The gradient multiplier map with variables as key, and multipliers as value. + """ + gradient_multipliers = {} + + for var in tf.model_variables(): + # Double the learning rate for biases. + if 'biases' in var.op.name: + gradient_multipliers[var.op.name] = 2. + + # Use larger learning rate for last layer variables. + for layer in last_layers: + if layer in var.op.name and 'biases' in var.op.name: + gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier + break + elif layer in var.op.name: + gradient_multipliers[var.op.name] = last_layer_gradient_multiplier + break + + return gradient_multipliers + + +def get_model_learning_rate(learning_policy, + base_learning_rate, + learning_rate_decay_step, + learning_rate_decay_factor, + training_number_of_steps, + learning_power, + slow_start_step, + slow_start_learning_rate, + slow_start_burnin_type='none', + decay_steps=0.0, + end_learning_rate=0.0, + boundaries=None, + boundary_learning_rates=None): + """Gets model's learning rate. + + Computes the model's learning rate for different learning policy. + Right now, only "step" and "poly" are supported. + (1) The learning policy for "step" is computed as follows: + current_learning_rate = base_learning_rate * + learning_rate_decay_factor ^ (global_step / learning_rate_decay_step) + See tf.train.exponential_decay for details. + (2) The learning policy for "poly" is computed as follows: + current_learning_rate = base_learning_rate * + (1 - global_step / training_number_of_steps) ^ learning_power + + Args: + learning_policy: Learning rate policy for training. + base_learning_rate: The base learning rate for model training. + learning_rate_decay_step: Decay the base learning rate at a fixed step. + learning_rate_decay_factor: The rate to decay the base learning rate. + training_number_of_steps: Number of steps for training. + learning_power: Power used for 'poly' learning policy. + slow_start_step: Training model with small learning rate for the first + few steps. + slow_start_learning_rate: The learning rate employed during slow start. + slow_start_burnin_type: The burnin type for the slow start stage. Can be + `none` which means no burnin or `linear` which means the learning rate + increases linearly from slow_start_learning_rate and reaches + base_learning_rate after slow_start_steps. + decay_steps: Float, `decay_steps` for polynomial learning rate. + end_learning_rate: Float, `end_learning_rate` for polynomial learning rate. + boundaries: A list of `Tensor`s or `int`s or `float`s with strictly + increasing entries. + boundary_learning_rates: A list of `Tensor`s or `float`s or `int`s that + specifies the values for the intervals defined by `boundaries`. It should + have one more element than `boundaries`, and all elements should have the + same type. + + Returns: + Learning rate for the specified learning policy. + + Raises: + ValueError: If learning policy or slow start burnin type is not recognized. + ValueError: If `boundaries` and `boundary_learning_rates` are not set for + multi_steps learning rate decay. + """ + global_step = tf.train.get_or_create_global_step() + adjusted_global_step = tf.maximum(global_step - slow_start_step, 0) + if decay_steps == 0.0: + tf.logging.info('Setting decay_steps to total training steps.') + decay_steps = training_number_of_steps - slow_start_step + if learning_policy == 'step': + learning_rate = tf.train.exponential_decay( + base_learning_rate, + adjusted_global_step, + learning_rate_decay_step, + learning_rate_decay_factor, + staircase=True) + elif learning_policy == 'poly': + learning_rate = tf.train.polynomial_decay( + base_learning_rate, + adjusted_global_step, + decay_steps=decay_steps, + end_learning_rate=end_learning_rate, + power=learning_power) + elif learning_policy == 'cosine': + learning_rate = tf.train.cosine_decay( + base_learning_rate, + adjusted_global_step, + training_number_of_steps - slow_start_step) + elif learning_policy == 'multi_steps': + if boundaries is None or boundary_learning_rates is None: + raise ValueError('Must set `boundaries` and `boundary_learning_rates` ' + 'for multi_steps learning rate decay.') + learning_rate = tf.train.piecewise_constant_decay( + adjusted_global_step, + boundaries, + boundary_learning_rates) + else: + raise ValueError('Unknown learning policy.') + + adjusted_slow_start_learning_rate = slow_start_learning_rate + if slow_start_burnin_type == 'linear': + # Do linear burnin. Increase linearly from slow_start_learning_rate and + # reach base_learning_rate after (global_step >= slow_start_steps). + adjusted_slow_start_learning_rate = ( + slow_start_learning_rate + + (base_learning_rate - slow_start_learning_rate) * + tf.to_float(global_step) / slow_start_step) + elif slow_start_burnin_type != 'none': + raise ValueError('Unknown burnin type.') + + # Employ small learning rate at the first few steps for warm start. + return tf.where(global_step < slow_start_step, + adjusted_slow_start_learning_rate, learning_rate) diff --git a/deeplab/models/research/deeplab/vis.py b/deeplab/models/research/deeplab/vis.py new file mode 100644 index 0000000..20808d3 --- /dev/null +++ b/deeplab/models/research/deeplab/vis.py @@ -0,0 +1,327 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Segmentation results visualization on a given set of images. + +See model.py for more details and usage. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os.path +import time +import numpy as np +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import training as contrib_training +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator +from deeplab.utils import save_annotation + +flags = tf.app.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +# Settings for log directories. + +flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.') + +flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') + +# Settings for visualizing the model. + +flags.DEFINE_integer('vis_batch_size', 1, + 'The number of images in each batch during evaluation.') + +flags.DEFINE_list('vis_crop_size', '513,513', + 'Crop size [height, width] for visualization.') + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test. +flags.DEFINE_multi_float('eval_scales', [1.0], + 'The scales to resize images for evaluation.') + +# Change to True for adding flipped images during test. +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images for evaluation or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. + +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('vis_split', 'val', + 'Which split of the dataset used for visualizing results') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + +flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes', 'ade20k'], + 'Visualization colormap type.') + +flags.DEFINE_boolean('also_save_raw_predictions', False, + 'Also save raw predictions.') + +flags.DEFINE_integer('max_number_of_iterations', 0, + 'Maximum number of visualization iterations. Will loop ' + 'indefinitely upon nonpositive values.') + +# The folder where semantic segmentation predictions are saved. +_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results' + +# The folder where raw semantic segmentation predictions are saved. +_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results' + +# The format to save image. +_IMAGE_FORMAT = '%06d_image' + +# The format to save prediction +_PREDICTION_FORMAT = '%06d_prediction' + +# To evaluate Cityscapes results on the evaluation server, the labels used +# during training should be mapped to the labels for evaluation. +_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + + +def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id): + """Converts the predicted label for evaluation. + + There are cases where the training labels are not equal to the evaluation + labels. This function is used to perform the conversion so that we could + evaluate the results on the evaluation server. + + Args: + prediction: Semantic segmentation prediction. + train_id_to_eval_id: A list mapping from train id to evaluation id. + + Returns: + Semantic segmentation prediction whose labels have been changed. + """ + converted_prediction = prediction.copy() + for train_id, eval_id in enumerate(train_id_to_eval_id): + converted_prediction[prediction == train_id] = eval_id + + return converted_prediction + + +def _process_batch(sess, original_images, semantic_predictions, image_names, + image_heights, image_widths, image_id_offset, save_dir, + raw_save_dir, train_id_to_eval_id=None): + """Evaluates one single batch qualitatively. + + Args: + sess: TensorFlow session. + original_images: One batch of original images. + semantic_predictions: One batch of semantic segmentation predictions. + image_names: Image names. + image_heights: Image heights. + image_widths: Image widths. + image_id_offset: Image id offset for indexing images. + save_dir: The directory where the predictions will be saved. + raw_save_dir: The directory where the raw predictions will be saved. + train_id_to_eval_id: A list mapping from train id to eval id. + """ + (original_images, + semantic_predictions, + image_names, + image_heights, + image_widths) = sess.run([original_images, semantic_predictions, + image_names, image_heights, image_widths]) + + num_image = semantic_predictions.shape[0] + for i in range(num_image): + image_height = np.squeeze(image_heights[i]) + image_width = np.squeeze(image_widths[i]) + original_image = np.squeeze(original_images[i]) + semantic_prediction = np.squeeze(semantic_predictions[i]) + crop_semantic_prediction = semantic_prediction[:image_height, :image_width] + + # Save image. + save_annotation.save_annotation( + original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i), + add_colormap=False) + + # Save prediction. + save_annotation.save_annotation( + crop_semantic_prediction, save_dir, + _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True, + colormap_type=FLAGS.colormap_type) + + if FLAGS.also_save_raw_predictions: + image_filename = os.path.basename(image_names[i]) + + if train_id_to_eval_id is not None: + crop_semantic_prediction = _convert_train_id_to_eval_id( + crop_semantic_prediction, + train_id_to_eval_id) + save_annotation.save_annotation( + crop_semantic_prediction, raw_save_dir, image_filename, + add_colormap=False) + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + + # Get dataset-dependent information. + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.vis_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=FLAGS.vis_batch_size, + crop_size=[int(sz) for sz in FLAGS.vis_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + model_variant=FLAGS.model_variant, + is_training=False, + should_shuffle=False, + should_repeat=False) + + train_id_to_eval_id = None + if dataset.dataset_name == data_generator.get_cityscapes_dataset_name(): + tf.logging.info('Cityscapes requires converting train_id to eval_id.') + train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID + + # Prepare for visualization. + tf.gfile.MakeDirs(FLAGS.vis_logdir) + save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER) + tf.gfile.MakeDirs(save_dir) + raw_save_dir = os.path.join( + FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER) + tf.gfile.MakeDirs(raw_save_dir) + + tf.logging.info('Visualizing on %s set', FLAGS.vis_split) + + with tf.Graph().as_default(): + samples = dataset.get_one_shot_iterator().get_next() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes}, + crop_size=[int(sz) for sz in FLAGS.vis_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + if tuple(FLAGS.eval_scales) == (1.0,): + tf.logging.info('Performing single-scale test.') + predictions = model.predict_labels( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Performing multi-scale test.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + predictions = model.predict_labels_multi_scale( + samples[common.IMAGE], + model_options=model_options, + eval_scales=FLAGS.eval_scales, + add_flipped_images=FLAGS.add_flipped_images) + predictions = predictions[common.OUTPUT_TYPE] + + if FLAGS.min_resize_value and FLAGS.max_resize_value: + # Only support batch_size = 1, since we assume the dimensions of original + # image after tf.squeeze is [height, width, 3]. + assert FLAGS.vis_batch_size == 1 + + # Reverse the resizing and padding operations performed in preprocessing. + # First, we slice the valid regions (i.e., remove padded region) and then + # we resize the predictions back. + original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE]) + original_image_shape = tf.shape(original_image) + predictions = tf.slice( + predictions, + [0, 0, 0], + [1, original_image_shape[0], original_image_shape[1]]) + resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]), + tf.squeeze(samples[common.WIDTH])]) + predictions = tf.squeeze( + tf.image.resize_images(tf.expand_dims(predictions, 3), + resized_shape, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True), 3) + + tf.train.get_or_create_global_step() + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + num_iteration = 0 + max_num_iteration = FLAGS.max_number_of_iterations + + checkpoints_iterator = contrib_training.checkpoints_iterator( + FLAGS.checkpoint_dir, min_interval_secs=FLAGS.eval_interval_secs) + for checkpoint_path in checkpoints_iterator: + num_iteration += 1 + tf.logging.info( + 'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + tf.logging.info('Visualizing with model %s', checkpoint_path) + + scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer()) + session_creator = tf.train.ChiefSessionCreator( + scaffold=scaffold, + master=FLAGS.master, + checkpoint_filename_with_path=checkpoint_path) + with tf.train.MonitoredSession( + session_creator=session_creator, hooks=None) as sess: + batch = 0 + image_id_offset = 0 + + while not sess.should_stop(): + tf.logging.info('Visualizing batch %d', batch + 1) + _process_batch(sess=sess, + original_images=samples[common.ORIGINAL_IMAGE], + semantic_predictions=predictions, + image_names=samples[common.IMAGE_NAME], + image_heights=samples[common.HEIGHT], + image_widths=samples[common.WIDTH], + image_id_offset=image_id_offset, + save_dir=save_dir, + raw_save_dir=raw_save_dir, + train_id_to_eval_id=train_id_to_eval_id) + image_id_offset += FLAGS.vis_batch_size + batch += 1 + + tf.logging.info( + 'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + if max_num_iteration > 0 and num_iteration >= max_num_iteration: + break + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_dir') + flags.mark_flag_as_required('vis_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/shapes/Circle.mtl b/shapes/Circle.mtl new file mode 100644 index 0000000..3b4b004 --- /dev/null +++ b/shapes/Circle.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.007 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/Circle.obj b/shapes/Circle.obj new file mode 100644 index 0000000..47d0969 --- /dev/null +++ b/shapes/Circle.obj @@ -0,0 +1,125 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Circle.mtl +o Circle +v -0.001828 0.000000 -0.013312 +v 0.000883 0.000000 -0.013405 +v -0.000004 0.000000 -0.013434 +v 0.001762 0.000000 -0.013318 +v 0.002630 0.000000 -0.013174 +v -0.003577 0.000000 -0.012954 +v 0.003484 0.000000 -0.012974 +v 0.004321 0.000000 -0.012719 +v -0.005236 0.000000 -0.012378 +v 0.005139 0.000000 -0.012411 +v 0.005936 0.000000 -0.012051 +v -0.006788 0.000000 -0.011600 +v 0.006708 0.000000 -0.011639 +v 0.007453 0.000000 -0.011177 +v -0.008218 0.000000 -0.010634 +v 0.008168 0.000000 -0.010665 +v 0.008852 0.000000 -0.010105 +v -0.009509 0.000000 -0.009498 +v 0.009500 0.000000 -0.009498 +v 0.010107 0.000000 -0.008850 +v -0.010645 0.000000 -0.008208 +v 0.010667 0.000000 -0.008167 +v -0.011610 0.000000 -0.006779 +v 0.011179 0.000000 -0.007452 +v 0.011641 0.000000 -0.006707 +v -0.012389 0.000000 -0.005227 +v 0.012053 0.000000 -0.005935 +v 0.012414 0.000000 -0.005139 +v -0.012965 0.000000 -0.003568 +v 0.012722 0.000000 -0.004321 +v 0.012976 0.000000 -0.003484 +v -0.013323 0.000000 -0.001820 +v 0.013176 0.000000 -0.002630 +v 0.013320 0.000000 -0.001762 +v -0.013445 -0.000000 0.000004 +v 0.013408 0.000000 -0.000884 +v 0.013437 -0.000000 0.000004 +v -0.013323 -0.000000 0.001827 +v 0.013314 -0.000000 0.001827 +v -0.012965 -0.000000 0.003576 +v 0.012957 -0.000000 0.003576 +v -0.012389 -0.000000 0.005235 +v 0.012381 -0.000000 0.005235 +v -0.011610 -0.000000 0.006786 +v 0.011602 -0.000000 0.006786 +v -0.010645 -0.000000 0.008216 +v 0.010636 -0.000000 0.008216 +v -0.009509 -0.000000 0.009506 +v 0.009500 -0.000000 0.009506 +v -0.008218 -0.000000 0.010642 +v 0.008209 -0.000000 0.010642 +v -0.006788 -0.000000 0.011607 +v 0.006780 -0.000000 0.011607 +v -0.005236 -0.000000 0.012386 +v 0.005228 -0.000000 0.012386 +v -0.003577 -0.000000 0.012962 +v 0.003569 -0.000000 0.012962 +v -0.001828 -0.000000 0.013320 +v 0.001820 -0.000000 0.013320 +v -0.000004 -0.000000 0.013442 +vn -0.0000 1.0000 0.0000 +usemtl SVGMat.007 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 1//1 5//1 4//1 +f 6//1 5//1 1//1 +f 6//1 7//1 5//1 +f 6//1 8//1 7//1 +f 9//1 8//1 6//1 +f 9//1 10//1 8//1 +f 9//1 11//1 10//1 +f 12//1 11//1 9//1 +f 12//1 13//1 11//1 +f 12//1 14//1 13//1 +f 15//1 14//1 12//1 +f 15//1 16//1 14//1 +f 15//1 17//1 16//1 +f 18//1 17//1 15//1 +f 18//1 19//1 17//1 +f 18//1 20//1 19//1 +f 21//1 20//1 18//1 +f 21//1 22//1 20//1 +f 23//1 22//1 21//1 +f 23//1 24//1 22//1 +f 23//1 25//1 24//1 +f 26//1 25//1 23//1 +f 26//1 27//1 25//1 +f 26//1 28//1 27//1 +f 29//1 28//1 26//1 +f 29//1 30//1 28//1 +f 29//1 31//1 30//1 +f 32//1 31//1 29//1 +f 32//1 33//1 31//1 +f 32//1 34//1 33//1 +f 35//1 34//1 32//1 +f 35//1 36//1 34//1 +f 35//1 37//1 36//1 +f 38//1 37//1 35//1 +f 38//1 39//1 37//1 +f 40//1 39//1 38//1 +f 40//1 41//1 39//1 +f 42//1 41//1 40//1 +f 42//1 43//1 41//1 +f 44//1 43//1 42//1 +f 44//1 45//1 43//1 +f 46//1 45//1 44//1 +f 46//1 47//1 45//1 +f 48//1 47//1 46//1 +f 48//1 49//1 47//1 +f 50//1 49//1 48//1 +f 50//1 51//1 49//1 +f 52//1 51//1 50//1 +f 52//1 53//1 51//1 +f 54//1 53//1 52//1 +f 54//1 55//1 53//1 +f 56//1 55//1 54//1 +f 56//1 57//1 55//1 +f 58//1 57//1 56//1 +f 58//1 59//1 57//1 +f 60//1 59//1 58//1 diff --git a/shapes/Half_Circle.mtl b/shapes/Half_Circle.mtl new file mode 100644 index 0000000..ca15ca0 --- /dev/null +++ b/shapes/Half_Circle.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.001 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/Half_Circle.obj b/shapes/Half_Circle.obj new file mode 100644 index 0000000..3234aca --- /dev/null +++ b/shapes/Half_Circle.obj @@ -0,0 +1,80 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Half_Circle.mtl +o Half_Circle +v -0.001822 0.000000 -0.006587 +v 0.001822 0.000000 -0.006587 +v 0.000000 0.000000 -0.006710 +v -0.003570 0.000000 -0.006231 +v 0.003570 0.000000 -0.006231 +v -0.005227 0.000000 -0.005656 +v 0.005227 0.000000 -0.005656 +v -0.006778 0.000000 -0.004879 +v 0.006778 0.000000 -0.004879 +v -0.008207 0.000000 -0.003914 +v 0.008207 0.000000 -0.003914 +v -0.009497 0.000000 -0.002781 +v 0.009498 0.000000 -0.002781 +v -0.010634 0.000000 -0.001492 +v 0.010634 0.000000 -0.001492 +v -0.011600 0.000000 -0.000065 +v 0.011600 0.000000 -0.000065 +v -0.012379 -0.000000 0.001484 +v 0.012379 -0.000000 0.001485 +v -0.012957 -0.000000 0.003141 +v 0.012957 -0.000000 0.003141 +v -0.013316 -0.000000 0.004889 +v 0.013316 -0.000000 0.004889 +v -0.013441 -0.000000 0.006710 +v 0.013441 -0.000000 0.006710 +v -0.012912 -0.000000 0.006710 +v -0.011450 -0.000000 0.006710 +v -0.009241 -0.000000 0.006710 +v -0.006472 -0.000000 0.006710 +v -0.003329 -0.000000 0.006710 +v 0.000000 -0.000000 0.006710 +v 0.003329 -0.000000 0.006710 +v 0.006472 -0.000000 0.006710 +v 0.009241 -0.000000 0.006710 +v 0.011450 -0.000000 0.006710 +v 0.012912 -0.000000 0.006710 +vn 0.0000 1.0000 0.0000 +vn 0.0000 1.0000 -0.0001 +vn 0.0000 0.0000 -1.0000 +vn 0.0000 0.0000 1.0000 +usemtl SVGMat.001 +s 1 +f 1//1 2//1 3//1 +f 4//1 2//1 1//1 +f 4//1 5//1 2//1 +f 6//1 5//1 4//1 +f 6//1 7//1 5//1 +f 8//1 7//1 6//1 +f 8//1 9//1 7//1 +f 10//1 9//1 8//1 +f 10//1 11//1 9//1 +f 12//1 11//1 10//1 +f 12//1 13//1 11//1 +f 14//1 13//1 12//1 +f 14//1 15//1 13//1 +f 16//1 15//1 14//1 +f 16//1 17//1 15//1 +f 18//1 17//1 16//1 +f 18//1 19//1 17//1 +f 20//1 19//1 18//1 +f 20//1 21//1 19//1 +f 22//1 21//1 20//1 +f 22//1 23//1 21//1 +f 24//1 23//1 22//1 +f 24//1 25//2 23//1 +f 26//1 25//2 24//1 +f 27//3 25//4 26//3 +f 28//4 25//4 27//3 +f 29//3 25//4 28//4 +f 30//4 25//4 29//3 +f 31//4 25//4 30//4 +f 32//4 25//4 31//4 +f 33//4 25//4 32//4 +f 34//4 25//4 33//4 +f 35//4 25//4 34//4 +f 36//4 25//4 35//4 diff --git a/shapes/Heart.mtl b/shapes/Heart.mtl new file mode 100644 index 0000000..17a6972 --- /dev/null +++ b/shapes/Heart.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.012 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/Heart.obj b/shapes/Heart.obj new file mode 100644 index 0000000..853dce4 --- /dev/null +++ b/shapes/Heart.obj @@ -0,0 +1,53 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Heart.mtl +o Heart +v 0.003850 0.000000 -0.012279 +v 0.008587 0.000000 -0.013150 +v 0.006222 0.000000 -0.013228 +v -0.008587 0.000000 -0.013150 +v -0.003850 0.000000 -0.012279 +v -0.006222 0.000000 -0.013228 +v 0.010720 0.000000 -0.012140 +v -0.010720 0.000000 -0.012140 +v 0.001701 0.000000 -0.010211 +v -0.001701 0.000000 -0.010211 +v 0.012391 0.000000 -0.010289 +v -0.012391 0.000000 -0.010289 +v 0.013374 0.000000 -0.007691 +v -0.013374 0.000000 -0.007691 +v 0.000000 0.000000 -0.006929 +v 0.013441 0.000000 -0.004440 +v -0.013441 0.000000 -0.004440 +v 0.012365 0.000000 -0.000630 +v -0.012365 0.000000 -0.000630 +v -0.009917 -0.000000 0.003647 +v 0.009917 -0.000000 0.003647 +v -0.005872 -0.000000 0.008298 +v 0.005872 -0.000000 0.008298 +v 0.000000 -0.000000 0.013228 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.012 +s 1 +f 1//1 2//1 3//1 +f 4//1 5//1 6//1 +f 1//1 7//1 2//1 +f 8//1 5//1 4//1 +f 9//1 7//1 1//1 +f 8//1 10//1 5//1 +f 9//1 11//1 7//1 +f 12//1 10//1 8//1 +f 9//1 13//1 11//1 +f 14//1 10//1 12//1 +f 15//1 13//1 9//1 +f 14//1 15//1 10//1 +f 15//1 16//1 13//1 +f 17//1 15//1 14//1 +f 17//1 16//1 15//1 +f 17//1 18//1 16//1 +f 19//1 18//1 17//1 +f 20//1 18//1 19//1 +f 20//1 21//1 18//1 +f 22//1 21//1 20//1 +f 22//1 23//1 21//1 +f 24//1 23//1 22//1 diff --git a/shapes/Plus.mtl b/shapes/Plus.mtl new file mode 100644 index 0000000..de372bb --- /dev/null +++ b/shapes/Plus.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.016 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/Plus.obj b/shapes/Plus.obj new file mode 100644 index 0000000..cd74ed2 --- /dev/null +++ b/shapes/Plus.obj @@ -0,0 +1,31 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib Plus.mtl +o Plus +v -0.006324 0.000000 -0.006322 +v 0.006324 0.000000 -0.013438 +v -0.006324 0.000000 -0.013438 +v 0.006324 0.000000 -0.006322 +v -0.013441 -0.000000 0.006322 +v -0.013441 0.000000 -0.006322 +v 0.013441 0.000000 -0.006322 +v 0.013441 -0.000000 0.006322 +v -0.006324 -0.000000 0.006322 +v -0.006324 -0.000000 0.013438 +v 0.006324 -0.000000 0.006322 +v 0.006324 -0.000000 0.013438 +vn 0.0000 1.0000 0.0000 +vn 0.0000 0.0000 1.0000 +vn 0.0000 0.0000 -1.0000 +usemtl SVGMat.016 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 +f 5//1 1//1 6//1 +f 5//1 4//1 1//1 +f 5//1 7//1 4//1 +f 5//1 8//1 7//1 +f 9//2 8//2 5//2 +f 10//1 11//1 9//1 +f 11//3 8//3 9//3 +f 10//1 12//1 11//1 diff --git a/shapes/shapes-1.svg b/shapes/shapes-1.svg new file mode 100644 index 0000000..a71b967 --- /dev/null +++ b/shapes/shapes-1.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +I + + +J + + +K + + +L + + +M + + +N + + + + diff --git a/shapes/shapes-10.svg b/shapes/shapes-10.svg new file mode 100644 index 0000000..cbb67ba --- /dev/null +++ b/shapes/shapes-10.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +E + + +F + + +G + + +H + + +I + + +J + + + + diff --git a/shapes/shapes-11.svg b/shapes/shapes-11.svg new file mode 100644 index 0000000..d7d48b8 --- /dev/null +++ b/shapes/shapes-11.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +K + + +L + + +M + + +N + + +O + + +P + + + + diff --git a/shapes/shapes-12.svg b/shapes/shapes-12.svg new file mode 100644 index 0000000..5c90a1e --- /dev/null +++ b/shapes/shapes-12.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +Q + + +R + + +S + + +T + + +U + + +V + + + + diff --git a/shapes/shapes-13.svg b/shapes/shapes-13.svg new file mode 100644 index 0000000..184c393 --- /dev/null +++ b/shapes/shapes-13.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +W + + +X + + +Y + + +Z + + +A + + +B + + + + diff --git a/shapes/shapes-14.svg b/shapes/shapes-14.svg new file mode 100644 index 0000000..139f84c --- /dev/null +++ b/shapes/shapes-14.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +C + + +D + + +E + + +F + + +G + + +H + + + + diff --git a/shapes/shapes-15.svg b/shapes/shapes-15.svg new file mode 100644 index 0000000..ad7ec40 --- /dev/null +++ b/shapes/shapes-15.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +A + + +B + + +C + + +D + + +E + + +F + + + + diff --git a/shapes/shapes-2.svg b/shapes/shapes-2.svg new file mode 100644 index 0000000..13f3383 --- /dev/null +++ b/shapes/shapes-2.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +O + + +P + + +Q + + +R + + +S + + +T + + + + diff --git a/shapes/shapes-3.svg b/shapes/shapes-3.svg new file mode 100644 index 0000000..8bf9f49 --- /dev/null +++ b/shapes/shapes-3.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +U + + +V + + +W + + +X + + +Y + + +Z + + + + diff --git a/shapes/shapes-4.svg b/shapes/shapes-4.svg new file mode 100644 index 0000000..218208a --- /dev/null +++ b/shapes/shapes-4.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +B + + +C + + +D + + +E + + +F + + +G + + + + diff --git a/shapes/shapes-5.svg b/shapes/shapes-5.svg new file mode 100644 index 0000000..8877ee2 --- /dev/null +++ b/shapes/shapes-5.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +H + + +I + + +J + + +K + + +L + + +M + + + + diff --git a/shapes/shapes-6.svg b/shapes/shapes-6.svg new file mode 100644 index 0000000..5400483 --- /dev/null +++ b/shapes/shapes-6.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +G + + +H + + +I + + +J + + +K + + +L + + + + diff --git a/shapes/shapes-7.svg b/shapes/shapes-7.svg new file mode 100644 index 0000000..1a0a256 --- /dev/null +++ b/shapes/shapes-7.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +M + + +N + + +O + + +P + + +Q + + +R + + + + diff --git a/shapes/shapes-8.svg b/shapes/shapes-8.svg new file mode 100644 index 0000000..882b3a5 --- /dev/null +++ b/shapes/shapes-8.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +S + + +T + + +U + + +V + + +W + + +X + + + + diff --git a/shapes/shapes-9.svg b/shapes/shapes-9.svg new file mode 100644 index 0000000..00df825 --- /dev/null +++ b/shapes/shapes-9.svg @@ -0,0 +1,32 @@ + + + + + + + + + + + + +Y + + +Z + + +A + + +B + + +C + + +D + + + + diff --git a/shapes/square.mtl b/shapes/square.mtl new file mode 100644 index 0000000..2a9c57b --- /dev/null +++ b/shapes/square.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.090 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/square.obj b/shapes/square.obj new file mode 100644 index 0000000..9cf0df6 --- /dev/null +++ b/shapes/square.obj @@ -0,0 +1,13 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib square.mtl +o Square +v -0.152400 -0.000000 0.152366 +v 0.152400 0.000000 -0.152366 +v -0.152400 0.000000 -0.152366 +v 0.152400 -0.000000 0.152366 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.090 +s 1 +f 1//1 2//1 3//1 +f 1//1 4//1 2//1 diff --git a/shapes/triangle.mtl b/shapes/triangle.mtl new file mode 100644 index 0000000..13962f2 --- /dev/null +++ b/shapes/triangle.mtl @@ -0,0 +1,12 @@ +# Blender MTL File: 'None' +# Material Count: 1 + +newmtl SVGMat.089 +Ns 323.999994 +Ka 1.000000 1.000000 1.000000 +Kd 1.000000 1.000000 1.000000 +Ks 0.500000 0.500000 0.500000 +Ke 0.000000 0.000000 0.000000 +Ni 1.000000 +d 1.000000 +illum 2 diff --git a/shapes/triangle.obj b/shapes/triangle.obj new file mode 100644 index 0000000..4adfa27 --- /dev/null +++ b/shapes/triangle.obj @@ -0,0 +1,11 @@ +# Blender v2.81 (sub 16) OBJ File: '' +# www.blender.org +mtllib triangle.mtl +o Triangle +v -0.152400 -0.000000 0.101585 +v 0.152400 -0.000000 0.101585 +v 0.000000 0.000000 -0.203170 +vn 0.0000 1.0000 0.0000 +usemtl SVGMat.089 +s 1 +f 1//1 2//1 3//1