From 36cb05b7b211d4c5d99586dd49d3195de16e4485 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:28:33 +0200 Subject: [PATCH 001/277] Continue on Docker arm64 failure (#9430) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c89d0ada3219..67ef565474a4 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 From 65afaa78beaa3d68d457e9c49109dc6327003962 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:53:36 +0200 Subject: [PATCH 002/277] Continue on Docker failure (all backends) (#9432) Continue on Docker failure (all) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 67ef565474a4..f9eec3bd839e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,6 +12,7 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest + continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -30,7 +31,6 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 - continue-on-error: true with: context: . platforms: linux/arm64 From abea53ea5b7d4eba6b58535d31e17336912d0d1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:10:10 +0200 Subject: [PATCH 003/277] Continue on Docker fail (all backends) fix (#9433) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f9eec3bd839e..1d0bd30b22cb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,7 +12,6 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest - continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -31,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 @@ -40,6 +40,7 @@ jobs: - name: Build and push CPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile-cpu @@ -48,6 +49,7 @@ jobs: - name: Build and push GPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile From f9869f7ffdbce757f260d28a6b799c5fa50263ee Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 16 Sep 2022 03:42:46 +0530 Subject: [PATCH 004/277] YOLOv5 segmentation model support (#9052) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix duplicate plots.py * Fix check_font() * # torch.use_deterministic_algorithms(True) * update doc detect->predict * Resolve precommit for segment/train and segment/val * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit for utils/segment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit min_wh * Resolve precommit utils/segment/plots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit utils/segment/general * Align NMS-seg closer to NMS * restore deterministic init_seeds code * remove easydict dependency * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * restore output_to_target mask * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * cleanup * Remove unused ImageFont import * Unified NMS * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * DetectMultiBackend compatibility * segment/predict.py update * update plot colors * fix bbox shifted * sort bbox by confidence * enable overlap by default * Merge detect/segment output_to_target() function * Start segmentation CI * fix plots * Update ci-testing.yml * fix training whitespace * optimize process mask functions (can we merge both?) * Update predict/detect * Update plot_images * Update plot_images_and_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Add train to CI * fix precommit * fix precommit CI * fix precommit pycocotools * fix val float issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix masks float float issues * suppress errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix no-predictions plotting bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add CSV Logger * fix val len(plot_masks) * speed up evaluation * fix process_mask * fix plots * update segment/utils build_targets * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optimize utils/segment/general crop() * optimize utils/segment/general crop() 2 * minor updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * torch.where revert * downsample only if different shape * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup * loss cleanup 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup 3 * update project names * Rename -seg yamls from _underscore to -dash * prepare for yolov5n-seg.pt * precommit space fix * add coco128-seg.yaml * update coco128-seg comments * cleanup val.py * Major val.py cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * precommit fix * precommit fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optional pycocotools * remove CI pip install pycocotools (auto-installed now) * seg yaml fix * optimize mask_iou() and masks_iou() * threaded fix * Major train.py update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Major segments/val/process_batch() update * yolov5/val updates from segment * process_batch numpy/tensor fix * opt-in to pycocotools with --save-json * threaded pycocotools ops for 2x speed increase * Avoid permute contiguous if possible * Add max_det=300 argument to both val.py and segment/val.py * fix onnx_dynamic * speed up pycocotools ops * faster process_mask(upsample=True) for predict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * eliminate permutations for process_mask(upsample=True) * eliminate permute-contiguous in crop(), use native dimension order * cleanup comment * Add Proto() module * fix class count * fix anchor order * broadcast mask_gti in loss for speed * Cleanup seg loss * faster indexing * faster indexing fix * faster indexing fix2 * revert faster indexing * fix validation plotting * Loss cleanup and mxyxy simplification * Loss cleanup and mxyxy simplification 2 * revert validation plotting * replace missing tanh * Eliminate last permutation * delete unneeded .float() * Remove MaskIOULoss and crop(if HWC) * Final v6.3 SegmentationModel architecture updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add support for TF export * remove debugger trace * add call * update * update * Merge master * Merge master * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Restore CI * Update dataloaders.py * Fix TF/TFLite export for segmentation model * Merge master * Cleanup predict.py mask plotting * cleanup scale_masks() * rename scale_masks to scale_image * cleanup/optimize plot_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Annotator.masks() * Annotator.masks() fix * Update plots.py * Annotator mask optimization * Rename crop() to crop_mask() * Do not crop in predict.py * crop always * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Merge master * Add vid-stride from master PR * Update seg model outputs * Update seg model outputs * Add segmentation benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add segmentation benchmarks * Add segmentation benchmarks * Add segmentation benchmarks * Fix DetectMultiBackend for OpenVINO * update Annotator.masks * fix val plot * revert val plot * clean up * revert pil * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix CI error * fix predict log * remove upsample * update interpolate * fix validation plot logging * Annotator.masks() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove segmentation_model definition * Restore 0.99999 decimals Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: Jiacong Fang --- .github/workflows/ci-testing.yml | 22 +- utils/benchmarks.py => benchmarks.py | 20 +- data/coco128-seg.yaml | 101 ++++ detect.py | 4 +- models/common.py | 18 +- models/segment/yolov5l-seg.yaml | 48 ++ models/segment/yolov5m-seg.yaml | 48 ++ models/segment/yolov5n-seg.yaml | 48 ++ models/segment/yolov5s-seg.yaml | 48 ++ models/segment/yolov5x-seg.yaml | 48 ++ models/tf.py | 36 +- models/yolo.py | 58 ++- segment/predict.py | 266 +++++++++++ segment/train.py | 676 +++++++++++++++++++++++++++ segment/val.py | 471 +++++++++++++++++++ utils/dataloaders.py | 1 + utils/general.py | 45 +- utils/metrics.py | 10 +- utils/plots.py | 71 ++- utils/segment/__init__.py | 0 utils/segment/augmentations.py | 104 +++++ utils/segment/dataloaders.py | 330 +++++++++++++ utils/segment/general.py | 120 +++++ utils/segment/loss.py | 186 ++++++++ utils/segment/metrics.py | 210 +++++++++ utils/segment/plots.py | 143 ++++++ val.py | 30 +- 27 files changed, 3091 insertions(+), 71 deletions(-) rename utils/benchmarks.py => benchmarks.py (87%) create mode 100644 data/coco128-seg.yaml create mode 100644 models/segment/yolov5l-seg.yaml create mode 100644 models/segment/yolov5m-seg.yaml create mode 100644 models/segment/yolov5n-seg.yaml create mode 100644 models/segment/yolov5s-seg.yaml create mode 100644 models/segment/yolov5x-seg.yaml create mode 100644 segment/predict.py create mode 100644 segment/train.py create mode 100644 segment/val.py mode change 100755 => 100644 utils/dataloaders.py mode change 100755 => 100644 utils/general.py create mode 100644 utils/segment/__init__.py create mode 100644 utils/segment/augmentations.py create mode 100644 utils/segment/dataloaders.py create mode 100644 utils/segment/general.py create mode 100644 utils/segment/loss.py create mode 100644 utils/segment/metrics.py create mode 100644 utils/segment/plots.py diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a83f997cbfc2..537ba96e7225 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -15,6 +15,7 @@ jobs: Benchmarks: runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: [ ubuntu-latest ] python-version: [ '3.9' ] # requires python<=3.9 @@ -37,9 +38,12 @@ jobs: python --version pip --version pip list - - name: Run benchmarks + - name: Benchmark DetectionModel + run: | + python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + - name: Benchmark SegmentationModel run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 Tests: timeout-minutes: 60 @@ -126,6 +130,20 @@ jobs: model(im) # warmup, build grids for trace torch.jit.trace(model, [im]) EOF + - name: Test segmentation + shell: bash # for Windows compatibility + run: | + m=${{ matrix.model }}-seg # official weights + b=runs/train-seg/exp/weights/best # best.pt checkpoint + python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train + python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu # train + for d in cpu; do # devices + for w in $m $b; do # weights + python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val + python segment/predict.py --imgsz 64 --weights $w.pt --device $d # predict + python export.py --weights $w.pt --img 64 --include torchscript --device $d # export + done + done - name: Test classification shell: bash # for Windows compatibility run: | diff --git a/utils/benchmarks.py b/benchmarks.py similarity index 87% rename from utils/benchmarks.py rename to benchmarks.py index 9d5c7f2965d5..58e083c95d55 100644 --- a/utils/benchmarks.py +++ b/benchmarks.py @@ -34,16 +34,19 @@ import pandas as pd FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory +ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative import export -import val +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from segment.val import run as val_seg from utils import notebook_init from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device +from val import run as val_det def run( @@ -59,6 +62,7 @@ def run( ): y, t = [], time.time() device = select_device(device) + model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported @@ -76,10 +80,14 @@ def run( assert suffix in str(w), 'export failed' # Validate - result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) - metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) - speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference + if model_type == SegmentationModel: + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) + else: # DetectionModel: + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) + speed = result[2][1] # times (preprocess, inference, postprocess) + y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml new file mode 100644 index 000000000000..5e81910cc456 --- /dev/null +++ b/data/coco128-seg.yaml @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128-seg ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/detect.py b/detect.py index a69606a3dff9..310d169281bf 100644 --- a/detect.py +++ b/detect.py @@ -149,8 +149,8 @@ def run( det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() # detections per class + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results diff --git a/models/common.py b/models/common.py index 8b7dbbfa95fe..0d90ff4f8827 100644 --- a/models/common.py +++ b/models/common.py @@ -375,7 +375,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if batch_dim.is_static: batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - output_layer = next(iter(executable_network.outputs)) stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -491,7 +490,7 @@ def forward(self, im, augment=False, visualize=False): y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 - y = self.executable_network([im])[self.output_layer] + y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) @@ -786,8 +785,21 @@ def __str__(self): return '' +class Proto(nn.Module): + # YOLOv5 mask Proto module for segmentation models + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() c_ = 1280 # efficientnet_b0 size diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml new file mode 100644 index 000000000000..4782de11dd2d --- /dev/null +++ b/models/segment/yolov5l-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml new file mode 100644 index 000000000000..f73d1992ac19 --- /dev/null +++ b/models/segment/yolov5m-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml new file mode 100644 index 000000000000..c28225ab4a50 --- /dev/null +++ b/models/segment/yolov5n-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml new file mode 100644 index 000000000000..7cbdb36b425c --- /dev/null +++ b/models/segment/yolov5s-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.5 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml new file mode 100644 index 000000000000..5d0c4524a99c --- /dev/null +++ b/models/segment/yolov5x-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/tf.py b/models/tf.py index ecb0d4d79c78..8cce147059d3 100644 --- a/models/tf.py +++ b/models/tf.py @@ -30,7 +30,7 @@ from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect +from models.yolo import Detect, Segment from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args @@ -320,6 +320,36 @@ def _make_grid(nx=20, ny=20): return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) +class TFSegment(TFDetect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): + super().__init__(nc, anchors, ch, imgsz, w) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv + self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos + self.detect = TFDetect.call + + def call(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else ((x[0], p),) + + +class TFProto(keras.layers.Layer): + + def __init__(self, c1, c_=256, c2=32, w=None): + super().__init__() + self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) + self.upsample = TFUpsample(None, scale_factor=2, mode='nearest') + self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) + self.cv3 = TFConv(c_, c2, w=w.cv3) + + def call(self, inputs): + return self.cv3(self.cv2(self.upsample(self.cv1(inputs)))) + + class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' @@ -377,10 +407,12 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) args = [ch[f]] elif m is Concat: c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: + elif m in [Detect, Segment]: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) args.append(imgsz) else: c2 = ch[f] diff --git a/models/yolo.py b/models/yolo.py index fa05fcf9a8d9..a0702a7c0257 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -36,6 +36,7 @@ class Detect(nn.Module): + # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode @@ -63,15 +64,16 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].sigmoid() + y = x[i].clone() + y[..., :5 + self.nc].sigmoid_() if self.inplace: y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) + y = torch.cat((xy, wh, etc), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) @@ -87,6 +89,23 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version return grid, anchor_grid +class Segment(Detect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, anchors, ch, inplace) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + def forward(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) + + class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): @@ -135,7 +154,7 @@ def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): @@ -169,11 +188,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Build strides, anchors m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): s = 256 # 2x min stride m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward - check_anchor_order(m) # must be in pixel-space (not grid-space) + forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once @@ -235,15 +255,21 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) - b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility +class SegmentationModel(DetectionModel): + # YOLOv5 segmentation model + def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): + super().__init__(cfg, ch, nc, anchors) + + class ClassificationModel(BaseModel): # YOLOv5 classification model def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index @@ -284,24 +310,28 @@ def parse_model(d, ch): # model_dict, input_channels(3) args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): + if m in { + Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: + if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) - elif m is Detect: + # TODO: channel, gw, gd + elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: diff --git a/segment/predict.py b/segment/predict.py new file mode 100644 index 000000000000..ba4cf2905255 --- /dev/null +++ b/segment/predict.py @@ -0,0 +1,266 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import process_mask +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks(masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=None if retina_masks else im[i]) + + # Write results + for *xyxy, conf, cls in reversed(det[:, :6]): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/train.py b/segment/train.py new file mode 100644 index 000000000000..bda379176151 --- /dev/null +++ b/segment/train.py @@ -0,0 +1,676 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 segment model on a segment dataset +Models and datasets download automatically from the latest YOLOv5 release. + +Usage - Single-GPU training: + $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) + $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +""" + +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +import segment.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, + print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss import ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + # if loggers.clearml: + # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML + # if loggers.wandb: + # data_dict = loggers.wandb.data_dict + # if resume: + # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + # + # # Register actions + # for k in methods(loggers): + # callbacks.register_action(k, callback=getattr(loggers, k)) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'opt': vars(opt), + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + # Weights & Biases arguments + # parser.add_argument('--entity', default=None, help='W&B: Entity') + # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/val.py b/segment/val.py new file mode 100644 index 000000000000..138aa00aaed3 --- /dev/null +++ b/segment/val.py @@ -0,0 +1,471 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 segment model on a segment dataset + +Usage: + $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + +Usage - formats: + $ python segment/val.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, + coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, + scale_coords, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad = 0.0 if task in ('speed', 'benchmark') else 0.5 + rect = False if task == 'benchmark' else pt # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)") + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) + + # Loss + if compute_loss: + loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + if opt.save_hybrid: + LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/dataloaders.py b/utils/dataloaders.py old mode 100755 new mode 100644 index d8ef11fd94b4..c04be853c580 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -484,6 +484,7 @@ def __init__(self, self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] diff --git a/utils/general.py b/utils/general.py old mode 100755 new mode 100644 index f5fb2c93a3d5..8633511f89f5 --- a/utils/general.py +++ b/utils/general.py @@ -798,15 +798,18 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def non_max_suppression(prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] @@ -816,7 +819,7 @@ def non_max_suppression(prediction, prediction = prediction[0] # select only inference output bs = prediction.shape[0] # batch size - nc = prediction.shape[2] - 5 # number of classes + nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Checks @@ -827,13 +830,14 @@ def non_max_suppression(prediction, # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.3 + 0.03 * bs # seconds to quit after + time_limit = 0.5 + 0.05 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * bs + mi = 5 + nc # mask start index + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height @@ -842,7 +846,7 @@ def non_max_suppression(prediction, # Cat apriori labels if autolabelling if labels and len(labels[xi]): lb = labels[xi] - v = torch.zeros((len(lb), nc + 5), device=x.device) + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls @@ -855,16 +859,17 @@ def non_max_suppression(prediction, # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] # zero columns if no masks # Detections matrix nx6 (xyxy, conf, cls) if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: @@ -880,6 +885,8 @@ def non_max_suppression(prediction, continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes diff --git a/utils/metrics.py b/utils/metrics.py index ee7d33982cfc..001813cbcd65 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -83,10 +83,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] diff --git a/utils/plots.py b/utils/plots.py index 0530d0abdf48..d8d5b225a774 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -23,6 +23,7 @@ from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness +from utils.segment.general import scale_image # Settings RANK = int(os.getenv('RANK', -1)) @@ -113,6 +114,52 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) + def masks(self, masks, colors, im_gpu=None, alpha=0.5): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if im_gpu is None: + # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) + if len(masks) == 0: + return + if isinstance(masks, torch.Tensor): + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks.permute(1, 2, 0).contiguous() + masks = masks.cpu().numpy() + # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) + masks = scale_image(masks.shape[:2], masks, self.im.shape) + masks = np.asarray(masks, dtype=np.float32) + colors = np.asarray(colors, dtype=np.float32) # shape(n,3) + s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together + masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) + self.im[:] = masks * alpha + self.im * (1 - s * alpha) + else: + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) @@ -124,6 +171,11 @@ def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + def result(self): # Return annotated image as array return np.asarray(self.im) @@ -180,26 +232,31 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting targets = [] for i, o in enumerate(output): - targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) - return np.array(targets) + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() @threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py new file mode 100644 index 000000000000..169addedf0f5 --- /dev/null +++ b/utils/segment/augmentations.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py new file mode 100644 index 000000000000..f6fe642d077f --- /dev/null +++ b/utils/segment/dataloaders.py @@ -0,0 +1,330 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders +""" + +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + # generator = torch.Generator() + # generator.manual_seed(0) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + # generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective( + img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"], + return_seg=True, + ) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py new file mode 100644 index 000000000000..36547ed0889c --- /dev/null +++ b/utils/segment/general.py @@ -0,0 +1,120 @@ +import cv2 +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) diff --git a/utils/segment/loss.py b/utils/segment/loss.py new file mode 100644 index 000000000000..b45b2c27e0a0 --- /dev/null +++ b/utils/segment/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py new file mode 100644 index 000000000000..b09ce23fb9e3 --- /dev/null +++ b/utils/segment/metrics.py @@ -0,0 +1,210 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py new file mode 100644 index 000000000000..e882c14390f0 --- /dev/null +++ b/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(np.bool) + else: + mask = image_masks[j].astype(np.bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/val.py b/val.py index 4b0bdddae3b1..6a0f18e28392 100644 --- a/val.py +++ b/val.py @@ -71,12 +71,12 @@ def save_one_json(predn, jdict, path, class_map): def process_batch(detections, labels, iouv): """ - Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Return correct prediction matrix Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (Array[N, 10]), for 10 IoU levels + correct (array[N, 10]), for 10 IoU levels """ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -102,6 +102,7 @@ def run( imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) @@ -187,7 +188,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -205,7 +206,7 @@ def run( # Inference with dt[1]: - out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) # Loss if compute_loss: @@ -215,10 +216,16 @@ def run( targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) # Metrics - for si, pred in enumerate(out): + for si, pred in enumerate(preds): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] @@ -258,9 +265,9 @@ def run( # Plot images if plots and batch_i < 3: plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -332,11 +339,12 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') From 58ad5ca5ce6b4fb3da6420bcc7b11a09e20674fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:21:13 +0200 Subject: [PATCH 005/277] Fix val.py zero-TP bug (#9431) Resolves https://github.com/ultralytics/yolov5/issues/9400 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 6a0f18e28392..e003d2144b7f 100644 --- a/val.py +++ b/val.py @@ -189,7 +189,8 @@ def run( names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') - dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') From a1e5f9a97de2a3ace012315208c686744ced2782 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:55:21 +0200 Subject: [PATCH 006/277] New model.yaml `activation:` field (#9371) * New model.yaml `activation:` field Add optional model yaml activation field to define model-wide activations, i.e.: ```yaml activation: nn.LeakyReLU(0.1) # activation with arguments activation: nn.SiLU() # activation with no arguments ``` Signed-off-by: Glenn Jocher * Update yolo.py Signed-off-by: Glenn Jocher * Add example models * l to m models * update * Add yolov5s-LeakyReLU.yaml * Update yolov5s-LeakyReLU.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 +++-- models/hub/yolov5s-LeakyReLU.yaml | 49 +++++++++++++++++++++++++++++++ models/yolo.py | 6 +++- 3 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 models/hub/yolov5s-LeakyReLU.yaml diff --git a/models/common.py b/models/common.py index 0d90ff4f8827..debbc2d03f60 100644 --- a/models/common.py +++ b/models/common.py @@ -39,11 +39,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + act = nn.SiLU() # default activation + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) @@ -54,8 +56,8 @@ def forward_fuse(self, x): class DWConv(Conv): # Depth-wise convolution - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml new file mode 100644 index 000000000000..3a179bf3311c --- /dev/null +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -0,0 +1,49 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index a0702a7c0257..46039c36d7e1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -297,8 +297,12 @@ def _from_yaml(self, cfg): def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) From c7a2d6bcf4f7e88db53f3d09a8484391dac7bc89 Mon Sep 17 00:00:00 2001 From: Hoyeong-GenGenAI <5404902+hotohoto@users.noreply.github.com> Date: Fri, 16 Sep 2022 18:53:18 +0900 Subject: [PATCH 007/277] Fix tick labels for background FN/FP (#9414) * Fix tick labels for background FN/FP In the confusion matrix. * Remove FP/FN from the background labels of the confusion matrix * Update metrics.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/metrics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 001813cbcd65..021a46ce5d37 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -170,12 +170,12 @@ def process_batch(self, detections, labels): if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: - self.matrix[self.nc, gc] += 1 # background FP + self.matrix[self.nc, gc] += 1 # true background if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN + self.matrix[dc, self.nc] += 1 # predicted background def matrix(self): return self.matrix @@ -197,6 +197,7 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, @@ -208,8 +209,8 @@ def plot(self, normalize=True, save_dir='', names=()): fmt='.2f', square=True, vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) ax.set_ylabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') From 03f2ca8eff8918b98169256d055353a1f15b8e32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 12:31:43 +0200 Subject: [PATCH 008/277] Fix TensorRT exports to ONNX opset 12 (#9441) * Fix TensorRT exports to ONNX opset 12 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1b25f3f8221b..cc4386ae4916 100644 --- a/export.py +++ b/export.py @@ -251,7 +251,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -274,11 +274,10 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') if dynamic: if im.shape[0] <= 1: @@ -288,7 +287,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: From 2ac4b634c745cc46c4728e682c6da66f79f6416a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 17:25:44 +0200 Subject: [PATCH 009/277] AutoShape explicit arguments fix (#9443) * AutoShape explicit arguments fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index debbc2d03f60..85b82e10a4e1 100644 --- a/models/common.py +++ b/models/common.py @@ -633,7 +633,7 @@ def forward(self, ims, size=640, augment=False, profile=False): autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): - return self.model(ims.to(p.device).type_as(p), augment, profile) # inference + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference # Pre-process n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images @@ -662,7 +662,7 @@ def forward(self, ims, size=640, augment=False, profile=False): with amp.autocast(autocast): # Inference with dt[1]: - y = self.model(x, augment, profile) # forward + y = self.model(x, augment=augment) # forward # Post-process with dt[2]: @@ -696,7 +696,7 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) - self.s = shape # inference BCHW shape + self.s = tuple(shape) # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] @@ -726,7 +726,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(s.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -743,7 +743,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) def show(self, labels=True): self.display(show=True, labels=labels) # show results From fe10b4abc054cba1b5fab1d3598b3caf77b53859 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 18:36:55 +0200 Subject: [PATCH 010/277] Update Detections() instance printing (#9445) * Update Detections() instance printing Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/models/common.py b/models/common.py index 85b82e10a4e1..9c08120fe7f6 100644 --- a/models/common.py +++ b/models/common.py @@ -698,14 +698,15 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) self.s = tuple(shape) # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + s, crops = '', [] for i, (im, pred) in enumerate(zip(self.ims, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') if show or save or render or crop: annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class @@ -725,8 +726,6 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -736,28 +735,27 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') return crops - def print(self): - self.display(pprint=True) # print results - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) - def show(self, labels=True): - self.display(show=True, labels=labels) # show results + self._run(show=True, labels=labels) # show results def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results + self._run(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results + return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): - self.display(render=True, labels=labels) # render results + self._run(render=True, labels=labels) # render results return self.ims def pandas(self): @@ -779,12 +777,17 @@ def tolist(self): # setattr(d, k, getattr(d, k)[0]) # pop out of list return x - def __len__(self): - return self.n # override len(results) + def print(self): + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results - def __str__(self): - self.print() # override print(results) - return '' + def __repr__(self): + return f'YOLOv5 {self.__class__} instance\n' + self.__str__() class Proto(nn.Module): From db06f495db02501ef94efe46171d952642dec880 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 20:44:56 +0200 Subject: [PATCH 011/277] AutoUpdate TensorFlow in export.py (#9447) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index cc4386ae4916..a575c73e375f 100644 --- a/export.py +++ b/export.py @@ -309,6 +309,7 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export + check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 From 5e1a9553fbed73995c9b81e63ba41cc70fdf89de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 21:46:07 +0200 Subject: [PATCH 012/277] AutoBatch `cudnn.benchmark=True` fix (#9448) * AutoBatch `cudnn.benchmark=True` fix May resolve https://github.com/ultralytics/yolov5/issues/9287 Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 3 +++ utils/general.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 641b055b9fe3..3204fd26fc41 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -33,6 +33,9 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): if device.type == 'cpu': LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size # Inspect CUDA memory gb = 1 << 30 # bytes to GiB (1024 ** 3) diff --git a/utils/general.py b/utils/general.py index 8633511f89f5..af95b3dc2b8b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -223,7 +223,7 @@ def init_seeds(seed=0, deterministic=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe - torch.backends.cudnn.benchmark = True # for faster training + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True From 4a4308001ce1699fca2d9566b652e2388a088973 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:19:43 +0200 Subject: [PATCH 013/277] Do not move downloaded zips (#9455) * Do not move downloaded zips Prevent multiple downloads on HUB of same dataset @kalenmike Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index af95b3dc2b8b..4d080f282ed0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -568,10 +568,10 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - f = dir / Path(url).name # filename - if Path(url).is_file(): # exists in current path - Path(url).rename(f) # move to dir - elif not f.exists(): + if Path(url).is_file(): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: From 6a9fffd19a96799c683c94d2d4da8c453e819116 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:42:24 +0200 Subject: [PATCH 014/277] Update general.py (#9454) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4d080f282ed0..38856b6bfa1c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -469,8 +469,7 @@ def check_dataset(data, autodownload=True): # Read yaml (optional) if isinstance(data, (str, Path)): - with open(data, errors='ignore') as f: - data = yaml.safe_load(f) # dictionary + data = yaml_load(data) # dictionary # Checks for k in 'train', 'val', 'names': @@ -485,7 +484,13 @@ def check_dataset(data, autodownload=True): path = (ROOT / path).resolve() for k in 'train', 'val', 'test': if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] # Parse yaml train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) @@ -496,13 +501,12 @@ def check_dataset(data, autodownload=True): if not s or not autodownload: raise Exception('Dataset not found ❌') t = time.time() - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - Path(root).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=root) # unzip + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -511,7 +515,7 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" LOGGER.info(f"Dataset download {s}") check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary From 060837406542c5c65301b8fde641f4d92a1f395e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 23:17:59 +0200 Subject: [PATCH 015/277] `Detect()` and `Segment()` fixes for CoreML and Paddle (#9458) * Detect() and Segment() fixes for CoreML and Paddle Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 46039c36d7e1..0dca6353a356 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -64,17 +64,17 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].clone() - y[..., :5 + self.nc].sigmoid_() - if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 + if isinstance(self, Segment): # (boxes + masks) + xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) + xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) + else: # Detect (boxes only) + xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, etc), 4) - z.append(y.view(bs, -1, self.no)) + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) From afb9860522e5023d64f4fd36fb78b6f26011f760 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:17:31 +0200 Subject: [PATCH 016/277] Add Paddle exports to benchmarks (#9459) * Add Paddle exports to benchmarks Signed-off-by: Glenn Jocher * Update plots.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- benchmarks.py | 2 +- models/common.py | 10 ++++------ utils/segment/plots.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 58e083c95d55..161af73c1eda 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -65,7 +65,7 @@ def run( model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' diff --git a/models/common.py b/models/common.py index 9c08120fe7f6..2b61307ad46b 100644 --- a/models/common.py +++ b/models/common.py @@ -460,8 +460,8 @@ def wrap_frozen_graph(gd, inputs, outputs): if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) - input_names = predictor.get_input_names() - input_handle = predictor.get_input_handle(input_names[0]) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -517,12 +517,10 @@ def forward(self, im, augment=False, visualize=False): k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output elif self.paddle: # PaddlePaddle - im = im.cpu().numpy().astype("float32") + im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() - output_names = self.predictor.get_output_names() - output_handle = self.predictor.get_output_handle(output_names[0]) - y = output_handle.copy_to_cpu() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel diff --git a/utils/segment/plots.py b/utils/segment/plots.py index e882c14390f0..9b90900b3772 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -99,9 +99,9 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' if mh != h or mw != w: mask = image_masks[j].astype(np.uint8) mask = cv2.resize(mask, (w, h)) - mask = mask.astype(np.bool) + mask = mask.astype(bool) else: - mask = image_masks[j].astype(np.bool) + mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 annotator.fromarray(im) From e8a9c5ae41b53f756e46de1190831b14b53c3b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:57:48 +0200 Subject: [PATCH 017/277] Add `macos-latest` runner for CoreML benchmarks (#9453) * Add `macos-latest` runner for CoreML benchmarks Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 2b61307ad46b..825a4c4e2633 100644 --- a/models/common.py +++ b/models/common.py @@ -514,8 +514,7 @@ def forward(self, im, augment=False, visualize=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) From 8ae81a6c87ebbf6a25c4dc2c77ef443b1d84098a Mon Sep 17 00:00:00 2001 From: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Date: Sun, 18 Sep 2022 18:27:43 +0800 Subject: [PATCH 018/277] Fix cutout bug (#9452) * fix cutout bug Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/augmentations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index a5587351f75b..f49110f43c6a 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -12,7 +12,7 @@ import torchvision.transforms as T import torchvision.transforms.functional as TF -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean @@ -281,7 +281,7 @@ def cutout(im, labels, p=0.5): # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels From 95cef1ae6b3bdf4ced616a2b6f3c9655803e9ea7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 12:42:23 +0200 Subject: [PATCH 019/277] Optimize imports (#9464) * Optimize imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reformat * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 -- utils/loggers/clearml/clearml_utils.py | 1 + utils/loggers/comet/hpo.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/segment/train.py b/segment/train.py index bda379176151..8abd0944551d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -39,8 +39,6 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import torch.nn.functional as F - import segment.val as validate # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import SegmentationModel diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 1e136907367d..eb1c12ce6cac 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -11,6 +11,7 @@ try: import clearml from clearml import Dataset, Task + assert hasattr(clearml, '__version__') # verify package import not local dir except (ImportError, AssertionError): clearml = None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index eab4df9978cf..7dd5c92e8de1 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -14,7 +14,7 @@ if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -from train import parse_opt, train +from train import train from utils.callbacks import Callbacks from utils.general import increment_path from utils.torch_utils import select_device From dc42e6ef2232979e6f0f606da670f42c6d59108c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 14:45:08 +0200 Subject: [PATCH 020/277] TensorRT SegmentationModel fix (#9465) * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * fix * sort output names * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 23 ++++++++++++----------- models/common.py | 27 ++++++++++++++++----------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/export.py b/export.py index a575c73e375f..9955870e9e43 100644 --- a/export.py +++ b/export.py @@ -66,7 +66,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect +from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) @@ -134,6 +134,15 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') + output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + torch.onnx.export( model.cpu() if dynamic else model, # --dynamic only compatible with cpu im.cpu() if dynamic else im, @@ -142,16 +151,8 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX opset_version=opset, do_constant_folding=True, input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + output_names=output_names, + dynamic_axes=dynamic or None) # Checks model_onnx = onnx.load(f) # load onnx model diff --git a/models/common.py b/models/common.py index 825a4c4e2633..d0bc65e02f91 100644 --- a/models/common.py +++ b/models/common.py @@ -390,18 +390,21 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, model = runtime.deserialize_cuda_engine(f.read()) context = model.create_execution_context() bindings = OrderedDict() + output_names = [] fp16 = False # default updated below dynamic = False - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - if model.binding_is_input(index): - if -1 in tuple(model.get_binding_shape(index)): # dynamic + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic dynamic = True - context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True - shape = tuple(context.get_binding_shape(index)) + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) @@ -495,15 +498,17 @@ def forward(self, im, augment=False, visualize=False): y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: - i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) - self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) s = self.bindings['images'].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data + y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) From 4d50cd3469d75b18e99ce1e831ca024e3d25a2d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 15:02:04 +0200 Subject: [PATCH 021/277] `Conv()` dilation argument fix (#9466) Resolves https://github.com/ultralytics/yolov5/issues/9384 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/common.py b/models/common.py index d0bc65e02f91..33db74dcd9ae 100644 --- a/models/common.py +++ b/models/common.py @@ -232,7 +232,7 @@ class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) @@ -245,8 +245,8 @@ class GhostConv(nn.Module): def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups super().__init__() c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) def forward(self, x): y = self.cv1(x) From 295c5e9d3ce70f5dbdb897c2da6a58e58f7c1125 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:13:22 +0200 Subject: [PATCH 022/277] Update ClassificationModel default training `imgsz=224` (#9469) Update ClassificationModel default training imgsz=224 To match classify/val.py and classify/predict.py Helps https://github.com/ultralytics/yolov5/issues/9462 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index 223367260bad..23c90e0a5274 100644 --- a/classify/train.py +++ b/classify/train.py @@ -3,7 +3,7 @@ Train a YOLOv5 classifier model on a classification dataset Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 @@ -272,7 +272,7 @@ def parse_opt(known=False): parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') parser.add_argument('--epochs', type=int, default=10, help='total training epochs') parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') From ca9c993d6c3c9f59c44d28b22d8968709cd11693 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:15:25 +0200 Subject: [PATCH 023/277] =?UTF-8?q?Standardize=20warnings=20with=20`WARNIN?= =?UTF-8?q?G=20=20=E2=9A=A0=EF=B8=8F=20...`=20(#9467)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Standardize warnings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- benchmarks.py | 2 +- classify/train.py | 2 +- export.py | 2 +- hubconf.py | 2 +- segment/train.py | 2 +- segment/val.py | 6 +++--- train.py | 2 +- utils/__init__.py | 10 ++++++++-- utils/autoanchor.py | 4 ++-- utils/autobatch.py | 2 +- utils/dataloaders.py | 18 +++++++++--------- utils/general.py | 21 ++++++++------------- utils/loggers/__init__.py | 4 ++-- utils/metrics.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/torch_utils.py | 2 +- val.py | 6 +++--- 17 files changed, 45 insertions(+), 44 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 161af73c1eda..b3b58eb3257c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -91,7 +91,7 @@ def run( except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch diff --git a/classify/train.py b/classify/train.py index 23c90e0a5274..178ebcdfff53 100644 --- a/classify/train.py +++ b/classify/train.py @@ -114,7 +114,7 @@ def train(opt, device): m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) if isinstance(model, DetectionModel): - LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count for m in model.modules(): diff --git a/export.py b/export.py index 9955870e9e43..ac9b13db8ec0 100644 --- a/export.py +++ b/export.py @@ -282,7 +282,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) diff --git a/hubconf.py b/hubconf.py index 2f05565629a5..4224760a4732 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/segment/train.py b/segment/train.py index 8abd0944551d..5121c5fa784a 100644 --- a/segment/train.py +++ b/segment/train.py @@ -176,7 +176,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/segment/val.py b/segment/val.py index 138aa00aaed3..59ab76672a30 100644 --- a/segment/val.py +++ b/segment/val.py @@ -345,7 +345,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -438,9 +438,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: diff --git a/train.py b/train.py index 4eff6e5d645a..9efece250581 100644 --- a/train.py +++ b/train.py @@ -173,7 +173,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/utils/__init__.py b/utils/__init__.py index 46225c2208ce..8403a6149827 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,9 +4,15 @@ """ import contextlib +import platform import threading +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager def __init__(self, msg=''): @@ -17,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(f'{self.msg}{value}') + print(emojis(f'{self.msg}{value}')) return True @@ -38,7 +44,7 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_font, check_requirements, emojis, is_colab + from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 0b49ab3319c0..7e7e9985d68a 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -122,7 +122,7 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 @@ -134,7 +134,7 @@ def print_results(k, verbose=True): k = kmeans(wh / s, n, iter=30)[0] * s # points assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar except Exception: - LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) diff --git a/utils/autobatch.py b/utils/autobatch.py index 3204fd26fc41..49435f51a244 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -65,7 +65,7 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_sizes[max(i - 1, 0)] # select prior safe point if b < 1 or b > 1024: # b outside of safe range b = batch_size - LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c04be853c580..5c3460eb0d6e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -116,7 +116,7 @@ def create_dataloader(path, prefix='', shuffle=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels( @@ -328,7 +328,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.auto = auto and self.rect self.transforms = transforms # optional if not self.rect: - LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -341,7 +341,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time @@ -543,7 +543,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings @@ -553,7 +553,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): path.with_suffix('.cache.npy').rename(path) # remove .npy suffix LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): @@ -917,7 +917,7 @@ def verify_image_label(args): f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' # verify labels if os.path.isfile(lb_file): @@ -939,7 +939,7 @@ def verify_image_label(args): lb = lb[i] # remove duplicates if segments: segments = [segments[x] for x in i] - msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty lb = np.zeros((0, 5), dtype=np.float32) @@ -949,7 +949,7 @@ def verify_image_label(args): return im_file, lb, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 - msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' return [None, None, None, None, nm, nf, ne, nc, msg] @@ -1012,7 +1012,7 @@ def _hub_ops(self, f, max_dim=1920): im = im.resize((int(im.width * r), int(im.height * r))) im.save(f_new, 'JPEG', quality=50, optimize=True) # save except Exception as e: # use OpenCV - print(f'WARNING: HUB ops PIL failure {f}: {e}') + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio diff --git a/utils/general.py b/utils/general.py index 38856b6bfa1c..fd0b4090a0fa 100644 --- a/utils/general.py +++ b/utils/general.py @@ -34,7 +34,7 @@ import torchvision import yaml -from utils import TryExcept +from utils import TryExcept, emojis from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness @@ -248,11 +248,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - def file_age(path=__file__): # Return days since last file update dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta @@ -333,7 +328,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: @@ -373,7 +368,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: - LOGGER.warning(f'{prefix} {e}') + LOGGER.warning(f'{prefix} ❌ {e}') def check_img_size(imgsz, s=32, floor=0): @@ -384,7 +379,7 @@ def check_img_size(imgsz, s=32, floor=0): imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -399,7 +394,7 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False @@ -589,9 +584,9 @@ def download_one(url, dir): if success: break elif i < retry: - LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') else: - LOGGER.warning(f'Failed to download {url}...') + LOGGER.warning(f'❌ Failed to download {url}...') if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') @@ -908,7 +903,7 @@ def non_max_suppression( output[xi] = x[i] if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index f29debb76907..941d09e19e2d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2 +from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_labels, plot_results @@ -393,7 +393,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: - print(f'WARNING: TensorBoard graph visualization failure {e}') + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') def web_project_name(project): diff --git a/utils/metrics.py b/utils/metrics.py index 021a46ce5d37..ed611d7d38fa 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING: ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index f6fe642d077f..d137caa5ab27 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,7 @@ def create_dataloader(path, mask_downsample_ratio=1, overlap_mask=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8a3366ca3e27..9f257d06ac60 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -47,7 +47,7 @@ def smartCrossEntropyLoss(label_smoothing=0.0): if check_version(torch.__version__, '1.10.0'): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') return nn.CrossEntropyLoss() diff --git a/val.py b/val.py index e003d2144b7f..3ab4bc3fdb58 100644 --- a/val.py +++ b/val.py @@ -282,7 +282,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -374,9 +374,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: From 92b52424d468feb48c51c3dde173d5d2c606a44b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 17:34:34 +0200 Subject: [PATCH 024/277] TensorFlow macOS AutoUpdate (#9471) * TensorFlow macOS AutoUpdate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 ++++++++--- requirements.txt | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index ac9b13db8ec0..ae292afe06f6 100644 --- a/export.py +++ b/export.py @@ -72,6 +72,8 @@ check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode +MACOS = platform.system() == 'Darwin' # macOS environment + def export_formats(): # YOLOv5 export formats @@ -224,7 +226,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS + if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) @@ -310,8 +312,11 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') - import tensorflow as tf + try: + import tensorflow as tf + except Exception: + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.tf import TFModel diff --git a/requirements.txt b/requirements.txt index 44fe1ce697b7..835346f218a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ seaborn>=0.11.0 # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 120e27e38efd4351b5e5bb5d735635f4cbf1bc86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:34:10 +0200 Subject: [PATCH 025/277] `classify/predict --save-txt` fix (#9478) Classify --save-txt Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 878cf48b6fef..4857c69766e7 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -119,13 +119,15 @@ def run( for i, prob in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 - p, im0 = path[i], im0s[i].copy() + p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: - p, im0 = path, im0s.copy() + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string annotator = Annotator(im0, example=str(names), pil=True) @@ -134,9 +136,12 @@ def run( s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " # Write results + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) if save_img or view_img: # Add bbox to image - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) annotator.text((32, 32), text, txt_color=(255, 255, 255)) + if save_txt: # Write to file + with open(f'{txt_path}.txt', 'a') as f: + f.write(text + '\n') # Stream results im0 = annotator.result() @@ -188,7 +193,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') From fda8aa551d0b732153c2e0848dd6abd887a41cd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:52:46 +0200 Subject: [PATCH 026/277] TensorFlow SegmentationModel support (#9472) * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TFLite fixes * GraphDef fixes * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- export.py | 2 +- models/common.py | 29 ++++++++++++++++++++--------- models/tf.py | 15 ++++++++------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 537ba96e7225..fffc92d1b72f 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -43,7 +43,7 @@ jobs: python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 - name: Benchmark SegmentationModel run: | - python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 Tests: timeout-minutes: 60 diff --git a/export.py b/export.py index ae292afe06f6..fe4e53d06cc3 100644 --- a/export.py +++ b/export.py @@ -341,7 +341,7 @@ def export_saved_model(model, m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) tfm.__call__(im) tf.saved_model.save(tfm, f, diff --git a/models/common.py b/models/common.py index 33db74dcd9ae..fac95a82fdb9 100644 --- a/models/common.py +++ b/models/common.py @@ -427,10 +427,17 @@ def wrap_frozen_graph(gd, inputs, outputs): ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + def gd_outputs(gd): + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -528,22 +535,26 @@ def forward(self, im, augment=False, visualize=False): else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() + y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() + y = self.frozen_func(x=self.tf.constant(im)) else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] + input = self.input_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: scale, zero_point = input['quantization'] im = (im / scale + zero_point).astype(np.uint8) # de-scale self.interpreter.set_tensor(input['index'], im) self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] diff --git a/models/tf.py b/models/tf.py index 8cce147059d3..ae58ca738e2e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -299,15 +299,15 @@ def call(self, inputs): x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference - y = tf.sigmoid(x[i]) + y = x[i] grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 - xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy - wh = y[..., 2:4] ** 2 * anchor_grid + xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy + wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - y = tf.concat([xy, wh, y[..., 4:]], -1) + y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @@ -333,8 +333,9 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) - return (x, p) if self.training else ((x[0], p),) + return (x, p) if self.training else (x[0], p) class TFProto(keras.layers.Layer): @@ -485,8 +486,8 @@ def predict(self, conf_thres, clip_boxes=False) return nms, x[1] - return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] - # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes # conf = x[..., 4:5] # x(6300,1) confidences # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes From f038ad71729960facad54407e1b353b0e81242e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 12:18:55 +0200 Subject: [PATCH 027/277] AutoBatch report include reserved+allocated (#9491) May resolve https://github.com/ultralytics/yolov5/issues/9287#issuecomment-1250767031 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 49435f51a244..bdeb91c3d2bd 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -19,7 +19,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory # Usage: # import torch # from utils.autobatch import autobatch @@ -67,6 +67,6 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_size LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - fraction = np.polyval(p, b) / t # actual fraction predicted + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') return b From 868c0e9bbb45b031e7bfd73c6d3983bcce07b9c1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 13:31:24 +0200 Subject: [PATCH 028/277] Update Detect() grid init `for` loop (#9494) May resolve threaded inference issue in https://github.com/ultralytics/yolov5/pull/9425#issuecomment-1250802928 by avoiding memory sharing on init. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 0dca6353a356..1d0da2a6e010 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -47,8 +47,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.empty(1)] * self.nl # init grid - self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid + self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid + self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) From 11640698977724daf7982c9da398c2ee2f2b6e91 Mon Sep 17 00:00:00 2001 From: mucunwuxian Date: Mon, 19 Sep 2022 21:01:46 +0900 Subject: [PATCH 029/277] Accelerate video inference (#9487) * The following code is slow, "self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride". * adjust... * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5c3460eb0d6e..5b03b4eb9759 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -232,8 +232,9 @@ def __next__(self): if self.video_flag[self.count]: # Read video self.mode = 'video' - ret_val, im0 = self.cap.read() - self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() while not ret_val: self.count += 1 self.cap.release() From 0b724c5b851b32bb3a8fbfab3cc2d68f93b4661e Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 19 Sep 2022 11:26:19 -0400 Subject: [PATCH 030/277] Comet Image Logging Fix (#9498) fix issues with image logging --- utils/loggers/comet/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 4ee86dd70d6e..3b3142b002c5 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -22,6 +22,7 @@ comet_ml = None COMET_PROJECT_NAME = None +import PIL import torch import torchvision.transforms as T import yaml @@ -131,6 +132,8 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar else: self.iou_thres = IOU_THRES + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 @@ -139,6 +142,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar if self.comet_log_predictions: self.metadata_dict = {} + self.logged_image_names = [] self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS @@ -249,11 +253,12 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - processed_image = (image * 255).to(torch.uint8) - image_id = path.split("/")[-1].split(".")[0] image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - self.log_image(to_pil(processed_image), name=image_name) + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) metadata = [] for cls, *xyxy in filtered_labels.tolist(): From 0171198f38f36c55090c91c49a7b5abacd571324 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 20:38:11 +0200 Subject: [PATCH 031/277] Fix visualization title bug (#9500) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d8d5b225a774..51bb7d6c20af 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -204,7 +204,6 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].axis('off') LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.title('Features') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save From 63368e71d23e453ded1d94094a2b43b75c1a54fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 20 Sep 2022 07:11:29 +0800 Subject: [PATCH 032/277] Add paddle tips (#9502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update export.py Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index fe4e53d06cc3..04c2ed9c802d 100644 --- a/export.py +++ b/export.py @@ -596,10 +596,11 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') + parser.add_argument( + '--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') opt = parser.parse_args() print_args(vars(opt)) return opt From 095f601d9d32ea0f0afd47554c068659939ecf4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 12:22:02 +0200 Subject: [PATCH 033/277] Segmentation `polygons2masks_overlap()` in `np.int32` (#9493) * Segmentation `polygons2masks_overlap()` in `np.int32` May resolve https://github.com/ultralytics/yolov5/issues/9461 WARNING: Masks should be uint8 for fastest speed, change needs profiling results to determine impact. @AyushExel @Laughing-q Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d137caa5ab27..49575f065752 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -308,7 +308,8 @@ def polygons2masks(img_size, polygons, color, downsample_ratio=1): def polygons2masks_overlap(img_size, segments, downsample_ratio=1): """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) areas = [] ms = [] for si in range(len(segments)): From f8b74631e50bcac1bef8a52283102a5feb7217a6 Mon Sep 17 00:00:00 2001 From: FeiGeChuanShu <774074168@qq.com> Date: Tue, 20 Sep 2022 19:04:45 +0800 Subject: [PATCH 034/277] Fix `random_perspective` param bug in segment (#9512) * fix random_perspective param bug when mosaic=False Signed-off-by: FeiGeChuanShu <774074168@qq.com> * Update dataloaders.py * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: FeiGeChuanShu <774074168@qq.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 49575f065752..97ef8556068e 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -140,17 +140,14 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels, segments = random_perspective( - img, - labels, - segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"], - return_seg=True, - ) + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) nl = len(labels) # number of labels if nl: From e233c038ed63780843446dd7bf00d5cc6a2711fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 16:38:04 +0200 Subject: [PATCH 035/277] Remove `check_requirements('flatbuffers==1.12')` (#9514) * Remove `check_requirements('flatbuffers==1.12')` Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/export.py b/export.py index 04c2ed9c802d..a2aa5e830c33 100644 --- a/export.py +++ b/export.py @@ -534,8 +534,6 @@ def run( if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], s_model = export_saved_model(model.cpu(), From bd35191033d52a9e48e6c8faaeaaa009243b988f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 18:47:14 +0200 Subject: [PATCH 036/277] Fix TF Lite exports (#9517) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From c0d97138456f2257f608c4120c8fd65abcf69326 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 19:01:03 +0200 Subject: [PATCH 037/277] TFLite fix 2 (#9518) * TFLite fix 2 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index ae58ca738e2e..0520c30a96df 100644 --- a/models/tf.py +++ b/models/tf.py @@ -310,7 +310,7 @@ def call(self, inputs): y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) @staticmethod def _make_grid(nx=20, ny=20): From 77dcf55168d59131f75b8187c6be27172eec00ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 22:57:42 +0200 Subject: [PATCH 038/277] FROM nvcr.io/nvidia/pytorch:22.08-py3 (#9520) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 4b9367cc27db..764ee278c22e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.07-py3 +FROM nvcr.io/nvidia/pytorch:22.08-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 6ebef288944ea3a8152f8e0c98a2aee0bd922144 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:12:12 +0200 Subject: [PATCH 039/277] Remove scikit-learn constraint on coremltools 6.0 (#9530) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 835346f218a4..75e7cc9e94d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn==0.19.2 # CoreML quantization +# scikit-learn # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 499a6bf5736a1b78341dfd142bd7c82f71ebf459 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:14:54 +0200 Subject: [PATCH 040/277] Update scikit-learn constraint per coremltools 6.0 (#9531) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 75e7cc9e94d3..17db73678fc1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn # CoreML quantization +# scikit-learn<=1.1.2 # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From db6847431b489a6b8d36c14f05e08970025d01a2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 17:55:25 +0200 Subject: [PATCH 041/277] Update `coremltools>=6.0` (#9532) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 17db73678fc1..55c1f2428e3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,7 @@ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- -# coremltools>=5.2 # CoreML export +# coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export From 6f0284763b0f66467dc04e5a5d87e5a68d1d49cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 19:53:26 +0200 Subject: [PATCH 042/277] Update albumentations (#9503) * Add `RandomResizedCrop(ratio)` * Update ratio * Update ratio * Update ratio * Update ratio * Update ratio * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create augmentations.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/augmentations.py | 27 +++++++++++++++------------ utils/dataloaders.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index f49110f43c6a..7c8e0bcdede6 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -21,7 +21,7 @@ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): + def __init__(self, size=640): self.transform = None prefix = colorstr('albumentations: ') try: @@ -29,6 +29,7 @@ def __init__(self): check_version(A.__version__, '1.0.3', hard=True) # version requirement T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), A.Blur(p=0.01), A.MedianBlur(p=0.01), A.ToGray(p=0.01), @@ -303,15 +304,17 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates -def classify_albumentations(augment=True, - size=224, - scale=(0.08, 1.0), - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): # YOLOv5 classification Albumentations (optional, only used if package is installed) prefix = colorstr('albumentations: ') try: @@ -319,7 +322,7 @@ def classify_albumentations(augment=True, from albumentations.pytorch import ToTensorV2 check_version(A.__version__, '1.0.3', hard=True) # version requirement if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] if auto_aug: # TODO: implement AugMix, AutoAug & RandAug in albumentation LOGGER.info(f'{prefix}auto augmentations are currently not supported') @@ -338,7 +341,7 @@ def classify_albumentations(augment=True, return A.Compose(T) except ImportError: # package not installed, skip - pass + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') except Exception as e: LOGGER.info(f'{prefix}{e}') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5b03b4eb9759..ee79bd0bc5a5 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -404,7 +404,7 @@ def __init__(self, self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path - self.albumentations = Albumentations() if augment else None + self.albumentations = Albumentations(size=img_size) if augment else None try: f = [] # image files From 999482b45163c1b808a187b02183f324a9c782cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 23:08:52 +0200 Subject: [PATCH 043/277] import re (#9535) * import re Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/export.py b/export.py index a2aa5e830c33..e3cf392b0101 100644 --- a/export.py +++ b/export.py @@ -48,6 +48,7 @@ import json import os import platform +import re import subprocess import sys import time @@ -427,8 +428,6 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') - import re - import tensorflowjs as tfjs LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') From 489920ab30b217fed14d3ddd31c23e9afc5be238 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 00:34:35 +0200 Subject: [PATCH 044/277] TF.js fix (#9536) * TF.js fix May resolve https://github.com/ultralytics/yolov5/issues/9534 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index 0520c30a96df..1446d8841646 100644 --- a/models/tf.py +++ b/models/tf.py @@ -485,7 +485,7 @@ def predict(self, iou_thres, conf_thres, clip_boxes=False) - return nms, x[1] + return (nms,) return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes From b25d5a75f2c89aace5cae342f3fe29dfdd46e401 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 23:23:40 +0200 Subject: [PATCH 045/277] Refactor dataset batch-size (#9551) --- classify/predict.py | 3 +-- detect.py | 3 +-- segment/predict.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 4857c69766e7..ef59ff6f550a 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,10 +91,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 310d169281bf..4015b9ae0d7f 100644 --- a/detect.py +++ b/detect.py @@ -99,10 +99,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index ba4cf2905255..2ea6bd9327e0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -101,10 +101,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference From 30fa9b610a3a6d9dc6a9e5961388710e5af0b704 Mon Sep 17 00:00:00 2001 From: zombob <2613669+zombob@users.noreply.github.com> Date: Fri, 23 Sep 2022 05:58:14 +0800 Subject: [PATCH 046/277] Add `--source screen` for screenshot inference (#9542) * add screenshot as source * fix: screen number support * Fix: mutiple screen specific area * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * parse screen args in LoadScreenshots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * sequence+ '_' as file name for save-txt save-crop * screenshot as stream * Update requirements.txt Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: xin Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 1 + classify/predict.py | 9 +++++--- detect.py | 9 +++++--- requirements.txt | 1 + segment/predict.py | 9 +++++--- tutorial.ipynb | 1 + utils/dataloaders.py | 49 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 70 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index da8bf1dad862..1d43111d56e7 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and python detect.py --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/classify/predict.py b/classify/predict.py index ef59ff6f550a..011e7b83f09b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, print_args, strip_optimizer) from utils.plots import Annotator @@ -52,7 +52,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(224, 224), # inference size (height, width) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu @@ -74,6 +74,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -91,6 +92,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -187,7 +190,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') diff --git a/detect.py b/detect.py index 4015b9ae0d7f..9036b26263e5 100644 --- a/detect.py +++ b/detect.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -50,7 +50,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -82,6 +82,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -99,6 +100,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -212,7 +215,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/requirements.txt b/requirements.txt index 55c1f2428e3f..914da54e73fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,6 +38,7 @@ seaborn>=0.11.0 ipython # interactive notebook psutil # system utilization thop>=0.1.1 # FLOPs computation +# mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow diff --git a/segment/predict.py b/segment/predict.py index 2ea6bd9327e0..43cebc706371 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -51,7 +51,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -84,6 +84,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -101,6 +102,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -222,7 +225,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/tutorial.ipynb b/tutorial.ipynb index 957437b2be6d..f87cccd99df8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -445,6 +445,7 @@ "python detect.py --source 0 # webcam\n", " img.jpg # image \n", " vid.mp4 # video\n", + " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", diff --git a/utils/dataloaders.py b/utils/dataloaders.py index ee79bd0bc5a5..7aee0b891161 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -185,6 +185,55 @@ def __iter__(self): yield from iter(self.sampler) +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): From 1320ce183e3997c4e3a7bf23c22b9edb222519a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Sep 2022 23:20:19 +0200 Subject: [PATCH 047/277] Update `is_url()` (#9566) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index dd2698f995a4..bd495068522d 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_online=True): - # Check if online file exists +def is_url(url, check_exists=True): + # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From d669a74623f273f74213a88b5233964d1ab3ea08 Mon Sep 17 00:00:00 2001 From: Gaz Iqbal Date: Fri, 23 Sep 2022 15:56:42 -0700 Subject: [PATCH 048/277] Detect.py supports running against a Triton container (#9228) * update coco128-seg comments * Enables detect.py to use Triton for inference Triton Inference Server is an open source inference serving software that streamlines AI inferencing. https://github.com/triton-inference-server/server The user can now provide a "--triton-url" argument to detect.py to use a local or remote Triton server for inference. For e.g., http://localhost:8000 will use http over port 8000 and grpc://localhost:8001 will use grpc over port 8001. Note, it is not necessary to specify a weights file to use Triton. A Triton container can be created by first exporting the Yolov5 model to a Triton supported runtime. Onnx, Torchscript, TensorRT are supported by both Triton and the export.py script. The exported model can then be containerized via the OctoML CLI. See https://github.com/octoml/octo-cli#getting-started for a guide. * added triton client to requirements * fixed support for TFSavedModels in Triton * reverted change * Test CoreML update Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Use pathlib Signed-off-by: Glenn Jocher * Refacto DetectMultiBackend to directly accept triton url as --weights http://... Signed-off-by: Glenn Jocher * Deploy category Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add printout and requirements check * Cleanup Signed-off-by: Glenn Jocher * triton fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed triton model query over grpc * Update check_requirements('tritonclient[all]') * group imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix likely remote URL bug * update comment * Update is_url() * Fix 2x download attempt on http://path/to/model.pt Signed-off-by: Glenn Jocher Co-authored-by: glennjocher Co-authored-by: Gaz Iqbal Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 8 ++--- models/common.py | 44 +++++++++++++++-------- requirements.txt | 3 ++ segment/predict.py | 2 +- utils/downloads.py | 4 +-- utils/triton.py | 85 +++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 utils/triton.py diff --git a/classify/predict.py b/classify/predict.py index 011e7b83f09b..d3bec8eea7ba 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -104,7 +104,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.Tensor(im).to(device) + im = torch.Tensor(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 if len(im.shape) == 3: im = im[None] # expand for batch dim diff --git a/detect.py b/detect.py index 9036b26263e5..e442ed75f4c7 100644 --- a/detect.py +++ b/detect.py @@ -49,7 +49,7 @@ @smart_inference_mode() def run( - weights=ROOT / 'yolov5s.pt', # model.pt path(s) + weights=ROOT / 'yolov5s.pt', # model path or triton URL source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) @@ -108,11 +108,11 @@ def run( vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: @@ -214,7 +214,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') diff --git a/models/common.py b/models/common.py index fac95a82fdb9..177704849d3d 100644 --- a/models/common.py +++ b/models/common.py @@ -10,6 +10,7 @@ from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path +from urllib.parse import urlparse import cv2 import numpy as np @@ -327,11 +328,13 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type - w = attempt_download(w) # download if not local + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) fp16 &= pt or jit or onnx or engine # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton): + w = attempt_download(w) # download if not local if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) @@ -342,7 +345,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() if extra_files['config.txt']: # load metadata dict d = json.loads(extra_files['config.txt'], @@ -472,6 +475,12 @@ def gd_outputs(gd): predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() + elif triton: # NVIDIA Triton Inference Server + LOGGER.info(f'Using {w} as Triton Inference Server...') + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith("tensorflow") else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -488,6 +497,8 @@ def forward(self, im, augment=False, visualize=False): b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) @@ -517,7 +528,7 @@ def forward(self, im, augment=False, visualize=False): self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized @@ -532,8 +543,10 @@ def forward(self, im, augment=False, visualize=False): self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef @@ -566,8 +579,8 @@ def from_numpy(self, x): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @@ -575,14 +588,17 @@ def warmup(self, imgsz=(1, 3, 640, 640)): @staticmethod def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats - sf = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, sf) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle + from utils.downloads import is_url + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + return types + [triton] @staticmethod def _load_metadata(f=Path('path/to/meta.yaml')): diff --git a/requirements.txt b/requirements.txt index 914da54e73fc..4d6ec3509efa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,9 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export +# Deploy -------------------------------------- +# tritonclient[all]~=2.24.0 + # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization diff --git a/segment/predict.py b/segment/predict.py index 43cebc706371..2e794c342de1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -114,7 +114,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: diff --git a/utils/downloads.py b/utils/downloads.py index bd495068522d..433de84b51ca 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_exists=True): +def is_url(url, check=True): # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False diff --git a/utils/triton.py b/utils/triton.py new file mode 100644 index 000000000000..a94ef0ad197d --- /dev/null +++ b/utils/triton.py @@ -0,0 +1,85 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" Utils to interact with the Triton Inference Server +""" + +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == "grpc": + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get("backend", self.metadata.get("platform")) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError("No inputs provided.") + if args_len and kwargs_len: + raise RuntimeError("Cannot specify args and kwargs at the same time") + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders From c8e52304cf5c34653570c5c3953ba061bc33c1af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Sep 2022 16:02:41 +0200 Subject: [PATCH 049/277] New `scale_segments()` function (#9570) * Rename scale_coords to scale_boxes * add scale_segments --- detect.py | 4 +-- models/common.py | 4 +-- segment/predict.py | 4 +-- segment/val.py | 6 ++--- utils/general.py | 46 ++++++++++++++++++++++++++------- utils/loggers/comet/__init__.py | 8 +++--- utils/plots.py | 4 +-- val.py | 6 ++--- 8 files changed, 54 insertions(+), 28 deletions(-) diff --git a/detect.py b/detect.py index e442ed75f4c7..4971033b35fb 100644 --- a/detect.py +++ b/detect.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode @@ -148,7 +148,7 @@ def run( annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/models/common.py b/models/common.py index 177704849d3d..273e73d9e729 100644 --- a/models/common.py +++ b/models/common.py @@ -23,7 +23,7 @@ from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, + increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -703,7 +703,7 @@ def forward(self, ims, size=640, augment=False, profile=False): self.multi_label, max_det=self.max_det) # NMS for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + scale_boxes(shape1, y[i][:, :4], shape0[i]) return Detections(ims, y, files, dt, self.names, x.shape) diff --git a/segment/predict.py b/segment/predict.py index 2e794c342de1..2241204715b5 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -152,7 +152,7 @@ def run( masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/segment/val.py b/segment/val.py index 59ab76672a30..0a37998c1771 100644 --- a/segment/val.py +++ b/segment/val.py @@ -44,7 +44,7 @@ from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -298,12 +298,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) diff --git a/utils/general.py b/utils/general.py index fd0b4090a0fa..87e7e20df1ab 100644 --- a/utils/general.py +++ b/utils/general.py @@ -725,7 +725,7 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: - clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -769,7 +769,23 @@ def resample_segments(segments, n=1000): return segments -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -778,15 +794,15 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + return segments -def clip_coords(boxes, shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually boxes[:, 0].clamp_(0, shape[1]) # x1 boxes[:, 1].clamp_(0, shape[0]) # y1 @@ -797,6 +813,16 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 +def clip_segments(boxes, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x + boxes[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x + boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + + def non_max_suppression( prediction, conf_thres=0.25, @@ -980,7 +1006,7 @@ def apply_classifier(x, model, img, im0): d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 3b3142b002c5..ba5cecc8e096 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -28,7 +28,7 @@ import yaml from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_coords, xywh2xyxy +from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou COMET_PREFIX = "comet://" @@ -293,14 +293,14 @@ def preprocess_prediction(self, image, labels, shape, pred): pred[:, 5] = 0 predn = pred.clone() - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) labelsn = None if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred return predn, labelsn diff --git a/utils/plots.py b/utils/plots.py index 51bb7d6c20af..36df271c60e1 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -20,7 +20,7 @@ from PIL import Image, ImageDraw, ImageFont from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness from utils.segment.general import scale_image @@ -565,7 +565,7 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) + clip_boxes(xyxy, im.shape) crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory diff --git a/val.py b/val.py index 3ab4bc3fdb58..c0954498d2fb 100644 --- a/val.py +++ b/val.py @@ -40,7 +40,7 @@ from utils.dataloaders import create_dataloader from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -244,12 +244,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: From f11a8a62d27c2740af5df940973d231fd5fcb038 Mon Sep 17 00:00:00 2001 From: Forever518 <1423429527@qq.com> Date: Sun, 25 Sep 2022 01:35:07 +0800 Subject: [PATCH 050/277] generator seed fix for DDP mAP drop (#9545) * Try to fix DDP mAP drop by setting generator's seed to RANK * Fix default activation bug * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 4 ++-- models/yolo.py | 2 +- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 8 +++++--- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 273e73d9e729..2fe99be8972b 100644 --- a/models/common.py +++ b/models/common.py @@ -40,13 +40,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) - act = nn.SiLU() # default activation + default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) diff --git a/models/yolo.py b/models/yolo.py index 1d0da2a6e010..ed21c067ee93 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -301,7 +301,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: - Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7aee0b891161..6cd1da6b9cf9 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -40,6 +40,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag @@ -139,7 +140,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, @@ -1169,7 +1170,7 @@ def create_classification_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 97ef8556068e..a63d6ec013fd 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -17,6 +17,8 @@ from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective +RANK = int(os.getenv('RANK', -1)) + def create_dataloader(path, imgsz, @@ -61,8 +63,8 @@ def create_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - # generator = torch.Generator() - # generator.manual_seed(0) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) return loader( dataset, batch_size=batch_size, @@ -72,7 +74,7 @@ def create_dataloader(path, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, - # generator=generator, + generator=generator, ), dataset From 55fbac933bc25b3151082021fa3f10790b3b936a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 02:59:25 +0200 Subject: [PATCH 051/277] Update default GitHub assets (#9573) * Update default GitHub assets Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 433de84b51ca..73b8334cb94a 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -87,9 +87,7 @@ def github_assets(repository, version='latest'): return file # GitHub assets - assets = [ - 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', - 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default try: tag, assets = github_assets(repo, release) except Exception: @@ -107,7 +105,6 @@ def github_assets(repository, version='latest'): safe_download( file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) min_bytes=1E5, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') From ee91dc9bb32d2dddc46c633b711a778a6c603143 Mon Sep 17 00:00:00 2001 From: "David A. Macey" Date: Sun, 25 Sep 2022 08:47:16 -0400 Subject: [PATCH 052/277] Update requirements.txt comment https://pytorch.org/get-started/locally/ (#9576) * Update Requirements with PyTorch CUDA Added --extra-index-url https://download.pytorch.org/whl/cu116 URL to requirements file for ease of creating venv with CUDA enabled PyTorch. Otherwise CPU PyTorch is installed an unable to use local GPUs. Signed-off-by: David A. Macey * Update requirements.txt Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher Signed-off-by: David A. Macey Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4d6ec3509efa..0436f415c642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 +torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 From 2787ad701fbb308cfb494ae8fb68b0fcea0e4077 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 14:52:49 +0200 Subject: [PATCH 053/277] Add segment line predictions (#9571) * Add segment line predictions Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 20 ++++++++++++-------- utils/segment/general.py | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 2241204715b5..607a8697d731 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,9 +42,10 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import process_mask +from utils.segment.general import masks2segments, process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -145,14 +146,16 @@ def run( save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + # Segments + if save_txt: + segments = reversed(masks2segments(masks)) + segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,10 +168,10 @@ def run( im_gpu=None if retina_masks else im[i]) # Write results - for *xyxy, conf, cls in reversed(det[:, :6]): + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + segj = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') @@ -176,6 +179,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) + annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/utils/segment/general.py b/utils/segment/general.py index 36547ed0889c..655123bdcfeb 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,4 +1,5 @@ import cv2 +import numpy as np import torch import torch.nn.functional as F @@ -118,3 +119,16 @@ def masks_iou(mask1, mask2, eps=1e-7): intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + segments.append(c.astype('float32')) + return segments From 966b0e09f0a5261e555c2a137af2ef9d58cc9779 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 16:21:26 +0200 Subject: [PATCH 054/277] TensorRT detect.py inference fix (#9581) * Update * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Segment fix * Segment fix Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 ++++++ classify/predict.py | 3 ++- detect.py | 3 ++- segment/predict.py | 5 +++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index fffc92d1b72f..1ec68e8412f9 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -44,6 +44,12 @@ jobs: - name: Benchmark SegmentationModel run: | python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 + - name: Test predictions + run: | + python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224 + python detect.py --weights ${{ matrix.model }}.onnx --img 320 + python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 + python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 Tests: timeout-minutes: 60 diff --git a/classify/predict.py b/classify/predict.py index d3bec8eea7ba..9114aab1d703 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -89,14 +89,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 4971033b35fb..8f48d8d28000 100644 --- a/detect.py +++ b/detect.py @@ -97,14 +97,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index 607a8697d731..94117cd78633 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -100,14 +100,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference @@ -179,7 +180,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) - annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) From 639d82fbabed66f347a17fd39cd058bcd26a4142 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 20:12:57 +0200 Subject: [PATCH 055/277] Update Comet links (#9587) * Update Comet links Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 1d43111d56e7..1c5e123d61e7 100644 --- a/README.md +++ b/README.md @@ -168,7 +168,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
- + @@ -186,7 +186,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases |:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
Why YOLOv5
diff --git a/tutorial.ipynb b/tutorial.ipynb index f87cccd99df8..8c78af2b84cd 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -865,7 +865,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -874,7 +874,7 @@ "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\"yolo-ui\"" diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 7b0b8e0e2f09..3a51cb9b5a25 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -253,4 +253,4 @@ comet optimizer -j utils/loggers/comet/hpo.py \ Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) -hyperparameter-yolo \ No newline at end of file +hyperparameter-yolo From 9006b41498a3bc512e293061e017a518f11e9902 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 21:40:28 +0200 Subject: [PATCH 056/277] Add global YOLOv5_DATASETS_DIR (#9586) Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 87e7e20df1ab..de7871cb23f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -43,8 +43,8 @@ RANK = int(os.getenv('RANK', -1)) # Settings -DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf From 9f1cf8dd1ca79b8128d73ac144e8899f51bc5816 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:03:14 +0200 Subject: [PATCH 057/277] Add Paperspace Gradient badges (#9588) * Add Paperspace Gradient badges Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 4 ++-- .github/workflows/greetings.yml | 8 ++++---- README.md | 7 +++++-- tutorial.ipynb | 5 +++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index bb62714f003f..7e8aa6f7f087 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -12,13 +12,13 @@ [English](../README.md) | 简体中文
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 91bf190eb727..5e1589c340ed 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,14 +44,14 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - ## Status - CI CPU testing + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/README.md b/README.md index 1c5e123d61e7..227735b52fac 100644 --- a/README.md +++ b/README.md @@ -12,13 +12,13 @@ English | [简体中文](.github/README_cn.md)
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

@@ -315,6 +315,9 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu Get started in seconds with our verified environments. Click each icon below for details.
+ + + diff --git a/tutorial.ipynb b/tutorial.ipynb index 8c78af2b84cd..5d867fb36c93 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -375,6 +375,7 @@ "\n", "\n", "
\n", + " \"Run\n", " \"Open\n", " \"Open\n", "
\n", @@ -945,7 +946,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" @@ -959,7 +960,7 @@ "source": [ "# Status\n", "\n", - "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", "\n", "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] From 959a4665f820362c95f7435dc05175deeff19671 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:26:15 +0200 Subject: [PATCH 058/277] #YOLOVISION22 announcement (#9590) * #YOLOVISION22 announcement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 227735b52fac..56349867e4b6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@
+ + Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. + + I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! + + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + + Save your spot at https://ultralytics.com/yolo-vision! + + + + +##
+
+

@@ -191,6 +206,8 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ##

Why YOLOv5
+YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. +

YOLOv5-P5 640 Figure (click to expand) From bfe052b8e1ab398e834a62b607e7d544e1a9876f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:39:08 +0200 Subject: [PATCH 059/277] Bump actions/stale from 5 to 6 (#9595) Bumps [actions/stale](https://github.com/actions/stale) from 5 to 6. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 03d99790a4a7..9067c343608b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v5 + - uses: actions/stale@v6 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From bd9c0c42aee090b373db51c7393c972c26ed9913 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 13:27:34 +0200 Subject: [PATCH 060/277] #YOLOVISION22 update (#9598) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56349867e4b6..514270973137 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. Save your spot at https://ultralytics.com/yolo-vision! From c4c0ee8fc35937cfa940fdaaaf6b9660f5b355f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 14:13:03 +0200 Subject: [PATCH 061/277] Apple MPS -> CPU NMS fallback strategy (#9600) Until more ops are fully supported this update will allow for seamless MPS inference (but slower MPS to CPU transfer before NMS, so slower NMS times). Partially resolves https://github.com/ultralytics/yolov5/issues/9596 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/general.py b/utils/general.py index de7871cb23f9..a855691d3a1f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,6 +843,8 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output + if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates From a5748e4b93ae6944ea813b26de6540e80141070b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:10:24 +0200 Subject: [PATCH 062/277] Updated Segmentation and Classification usage (#9607) * Updated Segmentation and Classification usage Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index e3cf392b0101..20c1fbc5c7b8 100644 --- a/export.py +++ b/export.py @@ -560,13 +560,20 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): + tp = type(model) + dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') + predict = 'detect.py' if tp is DetectionModel else 'predict.py' h = '--half' if half else '' # --half FP16 inference arg LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" + f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" f"\nVisualize: https://netron.app") + if tp is ClassificationModel: + LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") + if tp is SegmentationModel: + LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 6b2c9d1d0f5f9acad86ff9e7043f094a071aa6fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:46:50 +0200 Subject: [PATCH 063/277] Update export.py Usage examples (#9609) * Update export.py Usage examples Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 20c1fbc5c7b8..cf37965cea6b 100644 --- a/export.py +++ b/export.py @@ -560,20 +560,17 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): - tp = type(model) - dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') - predict = 'detect.py' if tp is DetectionModel else 'predict.py' + cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg + s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ + "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" f"\nVisualize: https://netron.app") - if tp is ClassificationModel: - LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") - if tp is SegmentationModel: - LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 1460e5715700cdb130472e1314074ff648f811d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 00:29:23 +0200 Subject: [PATCH 064/277] Fix `is_url('https://ultralytics.com')` (#9610) Failing on missing path, i.e. no 'www.' Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/downloads.py b/utils/downloads.py index 73b8334cb94a..60417c1f8835 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -21,7 +21,7 @@ def is_url(url, check=True): try: url = str(url) result = urllib.parse.urlparse(url) - assert all([result.scheme, result.netloc, result.path]) # check if is url + assert all([result.scheme, result.netloc]) # check if is url return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From 7314363f26e23fc831a9a739b4031f9f0217084a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 16:58:14 +0200 Subject: [PATCH 065/277] Add `results.save(save_dir='path', exist_ok=False)` (#9617) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 2fe99be8972b..d889d0292c61 100644 --- a/models/common.py +++ b/models/common.py @@ -775,12 +775,12 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l def show(self, labels=True): self._run(show=True, labels=labels) # show results - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir self._run(save=True, labels=labels, save_dir=save_dir) # save results - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): From 2373d5470e386a0c63c6ab77fbee6d699665e27b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 18:02:48 +0200 Subject: [PATCH 066/277] NMS MPS device wrapper (#9620) * NMS MPS device wrapper May resolve https://github.com/ultralytics/yolov5/issues/9613 Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a855691d3a1f..d31b043a113e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,7 +843,9 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output - if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes @@ -930,6 +932,8 @@ def non_max_suppression( i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded From 799e3d0cc92a9f431d97931641e7d0b46720699a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Sep 2022 16:43:11 +0200 Subject: [PATCH 067/277] Add SegmentationModel unsupported warning (#9632) * Add SegmentationModel unsupported warning Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 4224760a4732..95b95a5c30cc 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel + from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device @@ -47,8 +47,11 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + elif model.pt and isinstance(model.model, SegmentationModel): + LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' + 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: From 0860e58557f26a0136dd8afbc82f408f31d15ecd Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Fri, 30 Sep 2022 02:31:45 +0530 Subject: [PATCH 068/277] Disabled upload_dataset flag temporarily due to an artifact related bug (#9652) * disabled upload_dataset flag temporarily due to an artifact related bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/wandb/wandb_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e850d2ac8a7c..d2dd0fa7c6cd 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -132,6 +132,11 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ + # Temporary-fix + if opt.upload_dataset: + opt.upload_dataset = False + LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run From 82bec4c8785e123bbea01f6f2d4215c2077ac81f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Sep 2022 23:35:39 +0200 Subject: [PATCH 069/277] Add NVIDIA Jetson Nano Deployment tutorial (#9656) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 514270973137..8b1c98b34e8f 100644 --- a/README.md +++ b/README.md @@ -163,6 +163,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) - [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW - [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW - [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) - [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) From 8a19437690548a158b78ab27b7f5b463a268fa19 Mon Sep 17 00:00:00 2001 From: Anant Sakhare <70131870+senhorinfinito@users.noreply.github.com> Date: Sat, 1 Oct 2022 20:12:31 +0530 Subject: [PATCH 070/277] =?UTF-8?q?Added=20cutout=20import=20from=20utils/?= =?UTF-8?q?augmentations.py=20to=20use=20Cutout=20Aug=20in=20=E2=80=A6=20(?= =?UTF-8?q?#9668)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added cutout import from utils/augmentations.py to use Cutout Aug in data loader by un-commenting line 679, 680, 681 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6cd1da6b9cf9..d849d5150f4b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,7 +29,7 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) + cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first From 1158a50abd78808049327fdf60724b2b32726d88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Oct 2022 13:37:54 +0200 Subject: [PATCH 071/277] Simplify val.py benchmark mode with speed mode (#9674) Update --- benchmarks.py | 4 ++-- segment/val.py | 3 +-- val.py | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index b3b58eb3257c..ef5c882973f0 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -81,10 +81,10 @@ def run( # Validate if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) speed = result[2][1] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference diff --git a/segment/val.py b/segment/val.py index 0a37998c1771..f1ec54638d61 100644 --- a/segment/val.py +++ b/segment/val.py @@ -210,8 +210,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, diff --git a/val.py b/val.py index c0954498d2fb..ca838c0beb2f 100644 --- a/val.py +++ b/val.py @@ -169,8 +169,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, From c98128fe71a8676037a0605ab389c7473c743d07 Mon Sep 17 00:00:00 2001 From: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Date: Sun, 2 Oct 2022 18:25:10 -0400 Subject: [PATCH 072/277] Allow list for Comet artifact class 'names' field (#9654) * Update __init__.py In the Comet logger, when I run train.py, it wants to download the data artifact. It was requiring me to format the 'names' field in the data artifact metadata as a dictionary, so I've changed this so that it also accepts a list. Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update utils/loggers/comet/__init__.py Co-authored-by: Dhruv Nair Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Nair Co-authored-by: Glenn Jocher --- utils/loggers/comet/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index ba5cecc8e096..b0318f88d6a6 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -353,7 +353,14 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() data_dict["path"] = artifact_save_dir - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + + metadata_names = metadata.get("names") + if type(metadata_names) == dict: + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + elif type(metadata_names) == list: + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" data_dict = self.update_data_paths(data_dict) return data_dict From 68d654d8c4d473aa81be91ac42f320009736992b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Oct 2022 16:31:51 +0200 Subject: [PATCH 073/277] [pre-commit.ci] pre-commit suggestions (#9685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.37.3 → v2.38.2](https://github.com/asottile/pyupgrade/compare/v2.37.3...v2.38.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ba8005535397..1cd102c26b41 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 + rev: v2.38.2 hooks: - id: pyupgrade name: Upgrade code From e4398cf179601d47207e9f526cf0760b82058930 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Oct 2022 16:32:19 +0200 Subject: [PATCH 074/277] TensorRT `--dynamic` fix (#9691) * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index cf37965cea6b..66d4d636133a 100644 --- a/export.py +++ b/export.py @@ -251,11 +251,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -285,7 +285,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) From 7f097ddb6c9921d64fa504a8db79cf24fa0a913c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Oct 2022 22:29:46 +0200 Subject: [PATCH 075/277] FROM nvcr.io/nvidia/pytorch:22.09-py3 (#9711) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 764ee278c22e..9b93fad7b203 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.08-py3 +FROM nvcr.io/nvidia/pytorch:22.09-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 5ef69ef3e6180709bc292370ed314b6029ecabfc Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:55:15 -0600 Subject: [PATCH 076/277] Error in utils/segment/general `masks2segments()` (#9724) When running segmentation predict on gpu, the conversion from tensor to numpy fails. Calling `.cpu()` solves this problem. Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> --- utils/segment/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 655123bdcfeb..43bdc460f928 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -124,7 +124,7 @@ def masks_iou(mask1, mask2, eps=1e-7): def masks2segments(masks, strategy='largest'): # Convert masks(n,160,160) into segments(n,xy) segments = [] - for x in masks.int().numpy().astype('uint8'): + for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if strategy == 'concat': # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) From 209be932dec9e89b902f0ac2975fa599e9bc676f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Oct 2022 23:51:29 +0200 Subject: [PATCH 077/277] Fix segment evolution keys (#9742) * Update * Cleanup --- segment/train.py | 2 +- train.py | 4 +++- utils/general.py | 5 ++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/segment/train.py b/segment/train.py index 5121c5fa784a..26f0d0c13c78 100644 --- a/segment/train.py +++ b/segment/train.py @@ -651,7 +651,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/train.py b/train.py index 9efece250581..177e081c8c37 100644 --- a/train.py +++ b/train.py @@ -607,7 +607,9 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/utils/general.py b/utils/general.py index d31b043a113e..e2faca9dbf2a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -957,11 +957,10 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") -def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): evolve_csv = save_dir / 'evolve.csv' evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) From 2f1eb21ad6c0f715f38200c31e6e01a92c5acb25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Oct 2022 14:54:21 +0200 Subject: [PATCH 078/277] Remove #YOLOVISION22 notice (#9751) Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 8b1c98b34e8f..8c19e52c45d7 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,4 @@
- - Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. - - I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. - - Save your spot at https://ultralytics.com/yolo-vision! - - - - -##
-
-

From 7a69035eb8a15f44a1dc8f1e07ee71b674e98271 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 Oct 2022 12:53:12 +0200 Subject: [PATCH 079/277] Update Loggers (#9760) * Update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher * Update * Update README.md Signed-off-by: Glenn Jocher * Update Signed-off-by: Glenn Jocher --- README.md | 16 ++++++---------- requirements.txt | 2 +- tutorial.ipynb | 25 +++---------------------- utils/docker/Dockerfile | 2 +- utils/loggers/__init__.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 2 +- 6 files changed, 19 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 8c19e52c45d7..8f45ccd229b5 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,6 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) - [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW - [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW @@ -171,23 +170,20 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12

- + - + - + - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|:-:|:-:|:-:|:-:| +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| ##
Why YOLOv5
diff --git a/requirements.txt b/requirements.txt index 0436f415c642..52f7b9ea57d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,8 +16,8 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# wandb # clearml +# comet # Plotting ------------------------------------ pandas>=1.1.4 diff --git a/tutorial.ipynb b/tutorial.ipynb index 5d867fb36c93..63abebc5b37f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -655,7 +655,7 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", "\n", "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", @@ -664,10 +664,7 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init\n", - "elif logger == 'W&B':\n", - " %pip install -q wandb\n", - " import wandb; wandb.login()" + " %pip install -q clearml && clearml-init" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -699,7 +696,7 @@ "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", @@ -905,22 +902,6 @@ "id": "Lay2WsTjNJzP" } }, - { - "cell_type": "markdown", - "metadata": { - "id": "DLI1JmHU7B0l" - }, - "source": [ - "## Weights & Biases Logging\n", - "\n", - "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", - "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", - "\n", - "\n", - "\"Weights" - ] - }, { "cell_type": "markdown", "metadata": { diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 9b93fad7b203..be5c2fb71517 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 941d09e19e2d..bc8dd7621579 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - if not wandb: - prefix = colorstr('Weights & Biases: ') - s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - self.logger.info(s) + # if not wandb: + # prefix = colorstr('Weights & Biases: ') + # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" + # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -110,9 +110,9 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) # temp warn. because nested artifacts not supported after 0.12.10 - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - self.logger.warning(s) + # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + # self.logger.warning(s) else: self.wandb = None diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index d2dd0fa7c6cd..238f4edbf2a0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -135,7 +135,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): # Temporary-fix if opt.upload_dataset: opt.upload_dataset = False - LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") # Pre-training routine -- self.job_type = job_type From 85ae985b6a232f3a3e2f7400243cec2ca0b5f8d1 Mon Sep 17 00:00:00 2001 From: Vladislav Veklenko <71467601+vladoossss@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:44:01 +0200 Subject: [PATCH 080/277] update mask2segments and saving results (#9785) * update mask2segments and saving results * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/segment/general.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 43bdc460f928..b526333dc5a1 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -126,9 +126,12 @@ def masks2segments(masks, strategy='largest'): segments = [] for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found segments.append(c.astype('float32')) return segments From 16f87bb38e76a5aa14ee93252042063b678ece86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 02:32:06 +0200 Subject: [PATCH 081/277] HUB VOC fix (#9792) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index e2faca9dbf2a..d9d54d9e4f71 100644 --- a/utils/general.py +++ b/utils/general.py @@ -477,6 +477,7 @@ def check_dataset(data, autodownload=True): path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' if not path.is_absolute(): path = (ROOT / path).resolve() + data['path'] = path # download scripts for k in 'train', 'val', 'test': if data.get(k): # prepend path if isinstance(data[k], str): From 15b75659ddc2552bd9239db8a3c940322da49b80 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 15:27:16 +0200 Subject: [PATCH 082/277] Update hubconf.py local repo Usage example (#9803) * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 95b95a5c30cc..2c6ec13f815c 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,8 +4,10 @@ Usage: import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo """ import torch From 2a19d070d8a92bbf44dca8a40c503ec7406228d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 12:28:52 +0200 Subject: [PATCH 083/277] Fix xView dataloaders import (#9807) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/xView.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/xView.yaml b/data/xView.yaml index b134ceac8164..770ab7870449 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -87,7 +87,7 @@ download: | from PIL import Image from tqdm import tqdm - from utils.datasets import autosplit + from utils.dataloaders import autosplit from utils.general import download, xyxy2xywhn From df80e7c723b5722fe5b8d935ace73b8b28572ed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 18:18:58 +0200 Subject: [PATCH 084/277] Argoverse HUB fix (#9809) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/Argoverse.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index e3e9ba161ed0..558151dc849e 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -63,7 +63,7 @@ download: | # Download - dir = Path('../datasets/Argoverse') # dataset root dir + dir = Path(yaml['path']) # dataset root dir urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] download(urls, dir=dir, delete=False) From e42c89d4efc99bfbd8c5c208ffe67c11632da84a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 16 Oct 2022 20:51:32 +0200 Subject: [PATCH 085/277] `smart_optimizer()` revert to weight with decay (#9817) If a parameter does not fall into any other category Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 9f257d06ac60..04a3873854ee 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -319,12 +319,13 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): g = [], [], [] # optimizer parameter groups bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) - g[2].append(v.bias) - if isinstance(v, bn): # weight (no decay) - g[1].append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g[0].append(v.weight) + for p_name, p in v.named_parameters(recurse=0): + if p_name == 'bias': # bias (no decay) + g[2].append(p) + elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + g[1].append(p) + else: + g[0].append(p) # weight (with decay) if name == 'Adam': optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum From e3ff7806769444de864060494d1be8e18ce046a1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 14:34:33 +0200 Subject: [PATCH 086/277] Allow PyTorch Hub results to display in notebooks (#9825) * Allow PyTorch Hub results to display in notebooks * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 2 +- models/common.py | 13 +++++++++---- segment/predict.py | 2 +- utils/__init__.py | 2 +- utils/autoanchor.py | 2 +- utils/general.py | 17 +++++++++++++---- utils/metrics.py | 2 +- 8 files changed, 28 insertions(+), 14 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9114aab1d703..9373649bf27d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,7 +91,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/detect.py b/detect.py index 8f48d8d28000..98af7235ea69 100644 --- a/detect.py +++ b/detect.py @@ -99,7 +99,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/models/common.py b/models/common.py index d889d0292c61..e6da429de3e5 100644 --- a/models/common.py +++ b/models/common.py @@ -18,16 +18,20 @@ import requests import torch import torch.nn as nn +from IPython.display import display from PIL import Image from torch.cuda import amp +from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, - yaml_load) +from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, + colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode +CHECK_IMSHOW = check_imshow() + def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -756,7 +760,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) # show + im.show(self.files[i]) if CHECK_IMSHOW else display(im) if save: f = self.files[i] im.save(save_dir / f) # save @@ -772,6 +776,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l LOGGER.info(f'Saved results to {save_dir}\n') return crops + @TryExcept('Showing images is not supported in this environment') def show(self, labels=True): self._run(show=True, labels=labels) # show results diff --git a/segment/predict.py b/segment/predict.py index 94117cd78633..44d6d3904c19 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -102,7 +102,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/utils/__init__.py b/utils/__init__.py index 8403a6149827..0afe6f475625 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -23,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(emojis(f'{self.msg}{value}')) + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) return True diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 7e7e9985d68a..cfc4c276e3aa 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -26,7 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR: ') +@TryExcept(f'{PREFIX}ERROR') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() diff --git a/utils/general.py b/utils/general.py index d9d54d9e4f71..76bc0b1d7a79 100644 --- a/utils/general.py +++ b/utils/general.py @@ -27,6 +27,7 @@ from zipfile import ZipFile import cv2 +import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -73,6 +74,12 @@ def is_colab(): return 'COLAB_GPU' in os.environ +def is_notebook(): + # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace + ipython_type = str(type(IPython.get_ipython())) + return 'colab' in ipython_type or 'zmqshell' in ipython_type + + def is_kaggle(): # Is environment a Kaggle Notebook? return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' @@ -383,18 +390,20 @@ def check_img_size(imgsz, s=32, floor=0): return new_size -def check_imshow(): +def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' - assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + assert not is_notebook() + assert not is_docker() + assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') return False diff --git a/utils/metrics.py b/utils/metrics.py index ed611d7d38fa..f0bc787e1518 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn From acff977af3a6e23e9c25e932208efed73f9b7810 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 15:30:42 +0200 Subject: [PATCH 087/277] Logger Cleanup (#9828) --- segment/train.py | 12 ------------ train.py | 4 +--- utils/general.py | 2 +- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/segment/train.py b/segment/train.py index 26f0d0c13c78..5a5f15f10d84 100644 --- a/segment/train.py +++ b/segment/train.py @@ -91,17 +91,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = None if RANK in {-1, 0}: logger = GenericLogger(opt=opt, console_logger=LOGGER) - # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - # if loggers.clearml: - # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML - # if loggers.wandb: - # data_dict = loggers.wandb.data_dict - # if resume: - # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size - # - # # Register actions - # for k in methods(loggers): - # callbacks.register_action(k, callback=getattr(loggers, k)) # Config plots = not evolve and not opt.noplots # create plots @@ -400,7 +389,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} diff --git a/train.py b/train.py index 177e081c8c37..c24a8e81531d 100644 --- a/train.py +++ b/train.py @@ -53,7 +53,6 @@ one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume -from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve @@ -375,7 +374,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} @@ -483,7 +481,7 @@ def main(opt, callbacks=Callbacks()): check_requirements() # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: + if opt.resume and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset diff --git a/utils/general.py b/utils/general.py index 76bc0b1d7a79..8ea0ad07ed13 100644 --- a/utils/general.py +++ b/utils/general.py @@ -956,7 +956,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 From f1482b0667a7cb116fde43132c1e140a9f3cee20 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 13:54:33 +0200 Subject: [PATCH 088/277] Remove ipython from `check_requirements` exclude list (#9841) May resolve https://github.com/ultralytics/yolov5/commit/e3ff7806769444de864060494d1be8e18ce046a1#commitcomment-87136818 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 2c6ec13f815c..41af8e39d14d 100644 --- a/hubconf.py +++ b/hubconf.py @@ -39,7 +39,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) + check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: From 010cd0db7d491484caae3c31754b2cf13156baa7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 15:25:21 +0200 Subject: [PATCH 089/277] Update HUBDatasetStats() usage examples (#9842) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d849d5150f4b..5074d25ee268 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1005,13 +1005,18 @@ def verify_image_label(args): class HUBDatasetStats(): - """ Return dataset statistics dictionary with images and instances counts per split per class - To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) - Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + """ Class for generating HUB dataset JSON and `-hub` dataset directory + Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() """ def __init__(self, path='coco128.yaml', autodownload=False): From d0df6c840372b77a7c075f2231914f53112e79eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 16:35:04 +0200 Subject: [PATCH 090/277] Update ZipFile to context manager (#9843) * Update zipFile to context manager * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 6 ++-- utils/downloads.py | 81 -------------------------------------------- utils/general.py | 14 ++++++-- 3 files changed, 15 insertions(+), 86 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5074d25ee268..37b3ffb2728b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -17,7 +17,6 @@ from pathlib import Path from threading import Thread from urllib.parse import urlparse -from zipfile import ZipFile import numpy as np import torch @@ -31,7 +30,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -1053,7 +1053,7 @@ def _unzip(self, path): if not str(path).endswith('.zip'): # path is data.yaml return False, None, path assert Path(path).is_file(), f'Error unzipping {path}, file not found' - ZipFile(path).extractall(path=path.parent) # unzip + unzip_file(path, path=path.parent) dir = path.with_suffix('') # dataset directory == zip name assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path diff --git a/utils/downloads.py b/utils/downloads.py index 60417c1f8835..21bb6608d5ba 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -5,12 +5,9 @@ import logging import os -import platform import subprocess -import time import urllib from pathlib import Path -from zipfile import ZipFile import requests import torch @@ -109,81 +106,3 @@ def github_assets(repository, version='latest'): error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') return str(file) - - -def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - if file.exists(): - file.unlink() # remove existing file - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Error check - if r != 0: - if file.exists(): - file.unlink() # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - ZipFile(file).extractall(path=file.parent) # unzip - file.unlink() # remove zip - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - - -# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- -# -# -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/utils/general.py b/utils/general.py index 8ea0ad07ed13..fb8484ce434e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -511,7 +511,7 @@ def check_dataset(data, autodownload=True): LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=DATASETS_DIR) # unzip + unzip_file(f, path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -566,6 +566,16 @@ def yaml_save(file='data.yaml', data={}): yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ @@ -601,7 +611,7 @@ def download_one(url, dir): if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': - ZipFile(f).extractall(path=dir) # unzip + unzip_file(f, dir) # unzip elif f.suffix == '.tar': os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': From c4710012d83ec46f1759b38555c989e3c23ea727 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 21:22:37 +0200 Subject: [PATCH 091/277] Update README.md (#9846) @pderrenger Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 8f45ccd229b5..52f2854dd601 100644 --- a/README.md +++ b/README.md @@ -168,22 +168,22 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
+ + + + + + - - - - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 6371de8879e7ad7ec5283e8b95cc6dd85d6a5e72 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 22:26:53 +0200 Subject: [PATCH 092/277] Webcam show fix (#9847) * Webcam show fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 8 +++----- utils/general.py | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index e6da429de3e5..ba18cbce7429 100644 --- a/models/common.py +++ b/models/common.py @@ -24,14 +24,12 @@ from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, - colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode -CHECK_IMSHOW = check_imshow() - def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -760,7 +758,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) if CHECK_IMSHOW else display(im) + display(im) if is_notebook() else im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/utils/general.py b/utils/general.py index fb8484ce434e..e1823b50ac56 100644 --- a/utils/general.py +++ b/utils/general.py @@ -395,7 +395,6 @@ def check_imshow(warn=False): try: assert not is_notebook() assert not is_docker() - assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() From 3b1a9d22a45f1e16e21c8e8ebec9ccd17068cd08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 19:54:07 +0200 Subject: [PATCH 093/277] Fix OpenVINO Usage example (#9874) * Fix OpenVINO Usage example * Fix OpenVINO Usage example --- classify/predict.py | 2 +- classify/val.py | 2 +- detect.py | 2 +- export.py | 2 +- models/common.py | 4 ++-- segment/predict.py | 2 +- segment/val.py | 4 ++-- val.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9373649bf27d..96508d633da8 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -15,7 +15,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/classify/val.py b/classify/val.py index 3c16ec8092d8..c0b507785fb0 100644 --- a/classify/val.py +++ b/classify/val.py @@ -10,7 +10,7 @@ $ python classify/val.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/detect.py b/detect.py index 98af7235ea69..8e42fbe159d0 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/export.py b/export.py index 66d4d636133a..93845a0c14fa 100644 --- a/export.py +++ b/export.py @@ -28,7 +28,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/models/common.py b/models/common.py index ba18cbce7429..af8132fffb7a 100644 --- a/models/common.py +++ b/models/common.py @@ -318,7 +318,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # TorchScript: *.torchscript # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx --dnn - # OpenVINO: *.xml + # OpenVINO: *_openvino_model # CoreML: *.mlmodel # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model @@ -469,7 +469,7 @@ def gd_outputs(gd): check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') import paddle.inference as pdi if not Path(w).is_file(): # if not *.pdmodel - w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir + w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir weights = Path(w).with_suffix('.pdiparams') config = pdi.Config(str(w), str(weights)) if cuda: diff --git a/segment/predict.py b/segment/predict.py index 44d6d3904c19..3ae68240726a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -15,7 +15,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/segment/val.py b/segment/val.py index f1ec54638d61..a875b3b79907 100644 --- a/segment/val.py +++ b/segment/val.py @@ -4,13 +4,13 @@ Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) - $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/val.py b/val.py index ca838c0beb2f..127acf810029 100644 --- a/val.py +++ b/val.py @@ -9,7 +9,7 @@ $ python val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel From eef90572bf11602b17816a1721980cdb07a95eb2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 20:16:58 +0200 Subject: [PATCH 094/277] ClearML Dockerfile fix (#9876) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index be5c2fb71517..05776510e160 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From fba61e55836273847947498c01314499d8e5e7dc Mon Sep 17 00:00:00 2001 From: SSTato <1210546396@qq.com> Date: Mon, 24 Oct 2022 22:20:47 +0800 Subject: [PATCH 095/277] Windows Python 3.7 .isfile() fix (#9879) * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 37b3ffb2728b..403252ff6227 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -344,7 +344,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.img_size = img_size self.stride = stride self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n diff --git a/utils/general.py b/utils/general.py index e1823b50ac56..46978f1b8d7b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -426,12 +426,12 @@ def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() - if Path(file).is_file() or not file: # exists + if os.path.isfile(file) or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - if Path(file).is_file(): + if os.path.isfile(file): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') @@ -586,7 +586,7 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - if Path(url).is_file(): + if os.path.isfile(url): f = Path(url) # filename else: # does not exist f = dir / Path(url).name From 54f49fa581aac1d9909636bfc13f94001b08b55b Mon Sep 17 00:00:00 2001 From: paradigm Date: Tue, 25 Oct 2022 17:53:22 +0200 Subject: [PATCH 096/277] Add TFLite Metadata to TFLite and Edge TPU models (#9903) * added embedded meta data to tflite models * added try block for inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactored tfite meta data into separate function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Creat tmp file in /tmp * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * Update export.py * Update export.py * Update export.py * Update common.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 39 +++++++++++++++++++++++++++++++++++++-- models/common.py | 9 +++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 93845a0c14fa..e43d9b730fc6 100644 --- a/export.py +++ b/export.py @@ -45,6 +45,7 @@ """ import argparse +import contextlib import json import os import platform @@ -453,6 +454,39 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): return f, None +def add_tflite_metadata(file, metadata, num_outputs): + # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata + with contextlib.suppress(ImportError): + # check_requirements('tflite_support') + from tflite_support import flatbuffers + from tflite_support import metadata as _metadata + from tflite_support import metadata_schema_py_generated as _metadata_fb + + tmp_file = Path('/tmp/meta.txt') + with open(tmp_file, 'w') as meta_f: + meta_f.write(str(metadata)) + + model_meta = _metadata_fb.ModelMetadataT() + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + model_meta.associatedFiles = [label_file] + + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] + subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(file) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -550,8 +584,9 @@ def run( f[6], _ = export_pb(s_model, file) if tflite or edgetpu: f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) + if edgetpu: + f[8], _ = export_edgetpu(file) + add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: f[9], _ = export_tfjs(file) if paddle: # PaddlePaddle diff --git a/models/common.py b/models/common.py index af8132fffb7a..6347e51cdf0b 100644 --- a/models/common.py +++ b/models/common.py @@ -3,10 +3,13 @@ Common modules """ +import ast +import contextlib import json import math import platform import warnings +import zipfile from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path @@ -462,6 +465,12 @@ def gd_outputs(gd): interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs + # load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, "r") as model: + meta_file = model.namelist()[0] + meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') elif paddle: # PaddlePaddle From 8236d8818bca21c692d5c4508fee2af835ec1dbe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Oct 2022 18:13:48 +0200 Subject: [PATCH 097/277] Created using Colaboratory --- tutorial.ipynb | 141 +++---------------------------------------------- 1 file changed, 6 insertions(+), 135 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 63abebc5b37f..10e14b9b1208 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -954,7 +954,7 @@ "source": [ "# Appendix\n", "\n", - "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." + "Additional content below." ] }, { @@ -963,145 +963,16 @@ "id": "GMusP4OAxFu6" }, "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "# PyTorch Hub Model\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", - "\n", - "# Images\n", - "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", - "\n", - "# Inference\n", - "results = model(img)\n", - "\n", - "# Results\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." ], "execution_count": null, "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "FGH0ZjkGjejy" - }, - "source": [ - "# YOLOv5 CI\n", - "%%shell\n", - "rm -rf runs # remove runs/\n", - "m=yolov5n # official weights\n", - "b=runs/train/exp/weights/best # best.pt checkpoint\n", - "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", - "for d in 0 cpu; do # devices\n", - " for w in $m $b; do # weights\n", - " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", - " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", - " done\n", - "done\n", - "python hubconf.py --model $m # hub\n", - "python models/tf.py --weights $m.pt # build TF model\n", - "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", - "python export.py --weights $m.pt --img 64 --include torchscript # export" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "mcKoSIK2WSzj" - }, - "source": [ - "# Reproduce\n", - "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "gogI-kwi3Tye" - }, - "source": [ - "# Profile\n", - "from utils.torch_utils import profile\n", - "\n", - "m1 = lambda x: x * torch.sigmoid(x)\n", - "m2 = torch.nn.SiLU()\n", - "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "BSgFCAcMbk1R" - }, - "source": [ - "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification train\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", - " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", - " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" - ], - "metadata": { - "id": "UWGH7H6yakVl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", - "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" - ], - "metadata": { - "id": "yYgOiFNHZx-1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "metadata": { - "id": "aq4DPWGu0Bl1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "VTRwsvA9u7ln" - }, - "source": [ - "# TensorRT \n", - "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" - ], - "execution_count": null, - "outputs": [] } ] -} +} \ No newline at end of file From a5d875adcac05f8f68329c2cb742aba742d1953d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 13:42:52 +0200 Subject: [PATCH 098/277] Add `gnupg` to Dockerfile-cpu (#9932) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index d6fac645dba1..f3f81ec02c23 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 # Install pip packages From f9bb984e817a71a90490ed3a4655fb7ad408d8fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 14:06:05 +0200 Subject: [PATCH 099/277] Add ClearML minimum version requirement (#9933) * Add ClearML minimum version requirement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- utils/loggers/clearml/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 52f7b9ea57d2..8cb1bd4c6fe1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# clearml +# clearml>=1.2.0 # comet # Plotting ------------------------------------ diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 64eef6befc93..e0c5824bc2a2 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -54,7 +54,7 @@ That's it! You're done 😎 To enable ClearML experiment tracking, simply install the ClearML pip package. ```bash -pip install clearml +pip install clearml>=1.2.0 ``` This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` From 32a92185738c93e5f0b0f6971de0812cd6fd5f34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 23:51:40 +0200 Subject: [PATCH 100/277] Update Comet Integrations table text (#9937) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 52f2854dd601..dc21ad8d6639 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 38e5aae9a20522b69e21629f1558ab8902b351f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:37:25 +0200 Subject: [PATCH 101/277] Update README.md (#9957) * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 7 +------ README.md | 12 ++++-------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 7e8aa6f7f087..981fd8a5b820 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,12 +1,7 @@

- -

- -   - - +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index dc21ad8d6639..98cad8de4294 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,7 @@

- -

- -   - - +

English | [简体中文](.github/README_cn.md) @@ -23,8 +18,9 @@

- YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. +

From be9ef3871e85d6e06b736f08a1c9f1d01998afe6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:48:41 +0200 Subject: [PATCH 102/277] Update README.md (#9958) * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 2 +- tutorial.ipynb | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 981fd8a5b820..d0cf6b9ff3bd 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 98cad8de4294..64a2e9001538 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) diff --git a/tutorial.ipynb b/tutorial.ipynb index 10e14b9b1208..b40f08ef20b3 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -371,7 +371,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -975,4 +975,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6db0fac66cfb78697af21dc12d434774e4ccbcab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 18:25:33 +0200 Subject: [PATCH 103/277] Update README.md (#9961) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 64a2e9001538..8dbf527c2dbd 100644 --- a/README.md +++ b/README.md @@ -339,8 +339,7 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
From 575055ce7028ee99618ff1c5c0e8919e8e2cd849 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Fri, 28 Oct 2022 21:16:03 +0200 Subject: [PATCH 104/277] Switch from suffix checks to archive checks (#9963) * fix: switched from suffix checks to archive checks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup seems like both functions accept Path type input so removing str() * import always Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index 46978f1b8d7b..88cefb7bb662 100644 --- a/utils/general.py +++ b/utils/general.py @@ -23,8 +23,9 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from tarfile import is_tarfile from typing import Optional -from zipfile import ZipFile +from zipfile import ZipFile, is_zipfile import cv2 import IPython @@ -465,7 +466,7 @@ def check_dataset(data, autodownload=True): # Download (optional) extract_dir = '' - if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False @@ -607,11 +608,11 @@ def download_one(url, dir): else: LOGGER.warning(f'❌ Failed to download {url}...') - if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): LOGGER.info(f'Unzipping {f}...') - if f.suffix == '.zip': + if is_zipfile(f): unzip_file(f, dir) # unzip - elif f.suffix == '.tar': + elif is_tarfile(f): os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': os.system(f'tar xfz {f} --directory {f.parent}') # unzip From 6e544d5f7c0b699c7c6002074b822a03308bbe3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Oct 2022 13:31:01 +0200 Subject: [PATCH 105/277] FROM nvcr.io/nvidia/pytorch:22.10-py3 (#9966) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 05776510e160..87605456a5d9 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.09-py3 +FROM nvcr.io/nvidia/pytorch:22.10-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From a625f29967d09beeee1f010313a05ad7d5997c32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 Oct 2022 22:09:36 +0100 Subject: [PATCH 106/277] Full-size proto code (optional) (#9980) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tf.py Signed-off-by: Glenn Jocher * Update tf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 1446d8841646..3f3dc8dbe7e7 100644 --- a/models/tf.py +++ b/models/tf.py @@ -333,6 +333,7 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) @@ -355,8 +356,8 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor == 2, "scale_factor must be 2" - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, From e704970f7f606d6d3e58641e9384f38b532aa846 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 31 Oct 2022 12:43:17 +0100 Subject: [PATCH 107/277] Update README.md (#9970) * Update README.md @taliabender updated spacing per our convo Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8dbf527c2dbd..7cb4d09446ca 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,10 @@

YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. - + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ To request a commercial license please complete the form at Ultralytics Licensing. +

From a83d2a50132982fa89a22420155f6c9f097a92c7 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Mon, 31 Oct 2022 15:25:11 +0100 Subject: [PATCH 108/277] Segmentation Tutorial (#9521) * Added a tutorial notebook for segmentation. * Updated header for segmentation tutorial and included other YOLOv5 sponsor sections. * Updated segmentation tutorial to match main object detection tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- segment/tutorial.ipynb | 956 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 956 insertions(+) create mode 100644 segment/tutorial.ipynb diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb new file mode 100644 index 000000000000..47559978be74 --- /dev/null +++ b/segment/tutorial.ipynb @@ -0,0 +1,956 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", + "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", + "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "89f5f0a84ca642378724f1bf05f17e0d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/6.79M [00:00

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", + "Plotting labels to runs/train-seg/exp/labels.jpg... \n", + "Image sizes 640 train, 640 val\n", + "Using 4 dataloader workers\n", + "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", + "Starting training for 3 epochs...\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", + "\n", + "3 epochs completed in 0.006 hours.\n", + "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", + "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", + "\n", + "Validating runs/train-seg/exp/weights/best.pt...\n", + "Fusing layers... \n", + "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", + " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", + " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", + " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", + " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", + " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", + " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", + " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", + " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", + " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", + " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", + " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", + " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", + " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", + " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", + " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", + " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", + " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", + " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", + " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", + " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", + " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", + " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", + " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", + " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", + " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", + " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", + " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", + " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", + " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", + " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", + " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", + " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", + " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", + " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", + " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", + " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", + " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", + " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", + " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", + " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", + " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", + " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", + " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", + " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", + " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", + " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", + " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", + " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", + " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", + " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", + " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", + " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", + " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", + " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", + " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", + " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", + " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", + " mouse 128 2 1 0 0 0 1 0 0 0\n", + " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", + " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", + " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", + " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", + " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", + " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", + " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", + " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", + " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", + " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", + " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", + " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", + "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Train YOLOv5s on COCO128 for 3 epochs\n", + "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 49156eb1d18b6314554333c4bdae5ee3e6102992 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 12:02:05 +0100 Subject: [PATCH 109/277] Fix `is_colab()` (#9994) @AyushExel @kalenmike Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 88cefb7bb662..5cf156dfe664 100644 --- a/utils/general.py +++ b/utils/general.py @@ -72,7 +72,7 @@ def is_chinese(s='人工智能'): def is_colab(): # Is environment a Google Colab instance? - return 'COLAB_GPU' in os.environ + return 'google.colab' in sys.modules def is_notebook(): From cf99788823dc952b9a5f11fd8be869235e172122 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:27:36 +0100 Subject: [PATCH 110/277] Check online twice on AutoUpdate (#9999) Increased robustness to network failures Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 5cf156dfe664..cdf4f502fc9c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -369,7 +369,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + assert check_online() or check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From c55e2cd73b472de808665f8337d6edeaebb74521 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:53:14 +0100 Subject: [PATCH 111/277] Add `min_items` filter option (#9997) * Add `min_items` filter option @AyushExel @Laughing-q dataset filter Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 17 +++++++++++++++-- utils/segment/dataloaders.py | 3 ++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 403252ff6227..6b6e83e30456 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -444,6 +444,7 @@ def __init__(self, single_cls=False, stride=32, pad=0.0, + min_items=0, prefix=''): self.img_size = img_size self.augment = augment @@ -475,7 +476,7 @@ def __init__(self, # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e # Check cache self.label_files = img2label_paths(self.im_files) # labels @@ -505,7 +506,19 @@ def __init__(self, self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update - n = len(shapes) # number of images + + # Filter images + if min_items: + include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index a63d6ec013fd..9de6f0fbf903 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -93,12 +93,13 @@ def __init__( single_cls=False, stride=32, pad=0, + min_items=0, prefix="", downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, prefix) + stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap From 067ad9a2d1162fd33e6d47321e3f1d860b6df0e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 15:55:18 +0100 Subject: [PATCH 112/277] Improved `check_online()` robustness (#10000) * Improved check_online() robustness YOLOv5-wide improvement, not just in check_requirements() Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index cdf4f502fc9c..aae466ba5c90 100644 --- a/utils/general.py +++ b/utils/general.py @@ -283,11 +283,16 @@ def file_size(path): def check_online(): # Check internet connectivity import socket - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility - return True - except OSError: - return False + + def run_once(): + # Check once + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues def git_describe(path=ROOT): # path must be a directory @@ -369,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online() or check_online(), "AutoUpdate skipped (offline)" + assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 82a558557a825d380178527f4b0ff175f33457fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 16:41:37 +0100 Subject: [PATCH 113/277] Fix `min_items` (#10001) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6b6e83e30456..4e5b75edb5c2 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -509,8 +509,8 @@ def __init__(self, # Filter images if min_items: - include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) - LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') self.im_files = [self.im_files[i] for i in include] self.label_files = [self.label_files[i] for i in include] self.labels = [self.labels[i] for i in include] From 02b8a4c21bb6d9419bbf01d4af20724743dab58b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Nov 2022 17:58:15 +0100 Subject: [PATCH 114/277] Update default `--epochs 100` (#10024) * Update default `--epochs 100` @AyushExel @kalenmike updating default Detection and Segmentation trainings to 100 epochs Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 10 +++++----- README.md | 10 +++++----- segment/train.py | 2 +- train.py | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index d0cf6b9ff3bd..4184c4c683d0 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -111,11 +111,11 @@ python detect.py --source 0 # 网络摄像头 数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/README.md b/README.md index 7cb4d09446ca..efe7d4b090bc 100644 --- a/README.md +++ b/README.md @@ -126,11 +126,11 @@ largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/segment/train.py b/segment/train.py index 5a5f15f10d84..7950f95df4f2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -463,7 +463,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') diff --git a/train.py b/train.py index c24a8e81531d..e882748581bf 100644 --- a/train.py +++ b/train.py @@ -433,7 +433,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From fde77584687041aa62795bb2c27e895cf73686bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Nov 2022 15:30:12 +0100 Subject: [PATCH 115/277] YOLOv5 AutoCache Update (#10027) * AutoCache * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * AutoCache * AutoCache * AutoCache * AutoCache Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 +- train.py | 2 +- utils/__init__.py | 1 - utils/dataloaders.py | 34 +++++++++++++++++++++++++++------- utils/general.py | 2 +- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/segment/train.py b/segment/train.py index 7950f95df4f2..f067918e7c3c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -474,7 +474,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/train.py b/train.py index e882748581bf..1fe6cf4d9ebd 100644 --- a/train.py +++ b/train.py @@ -444,7 +444,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/__init__.py b/utils/__init__.py index 0afe6f475625..8354d91c4269 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -47,7 +47,6 @@ def notebook_init(verbose=True): from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports - check_requirements(('psutil', 'IPython')) check_font() import psutil diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 4e5b75edb5c2..b33a24a46f9c 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -19,6 +19,7 @@ from urllib.parse import urlparse import numpy as np +import psutil import torch import torch.nn.functional as F import torchvision @@ -30,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, + xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -564,24 +565,43 @@ def __init__(self, self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False self.ims = [None] * n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: - gb = 0 # Gigabytes of cached images + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': - gb += self.npy_files[i].stat().st_size + b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.ims[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' pbar.close() + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict diff --git a/utils/general.py b/utils/general.py index aae466ba5c90..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -374,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + # assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 78ed31c95a3b01c98a39a5b2edceb48ab630c95d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 15:06:18 +0100 Subject: [PATCH 116/277] IoU `eps` adjustment (#10051) IoU eps adjustment Unify h1 and h2 with eps values Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index f0bc787e1518..3b854d4f1583 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,8 +234,8 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ @@ -253,7 +253,7 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 6ae3dff7d48bd914a5ab5d20e277b8222cd547c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 18:43:07 +0100 Subject: [PATCH 117/277] Update get_coco.sh (#10057) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/scripts/get_coco.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 506d46df9fb5..0d388b0a12a8 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -30,7 +30,7 @@ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ if [ "$segments" == "true" ]; then f='coco2017labels-segments.zip' # 168 MB else - f='coco2017labels.zip' # 168 MB + f='coco2017labels.zip' # 46 MB fi echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & From 58b3d078543ed92bb960ec3f213291c5fd459e43 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 22:56:23 +0100 Subject: [PATCH 118/277] [pre-commit.ci] pre-commit suggestions (#10068) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.38.2 → v3.2.0](https://github.com/asottile/pyupgrade/compare/v2.38.2...v3.2.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1cd102c26b41..0106b4aab523 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.2.0 hooks: - id: pyupgrade name: Upgrade code From e00d02d78b772d7848689d8947238e4b05986a54 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Nov 2022 23:07:10 +0100 Subject: [PATCH 119/277] Use MNIST160 (#10069) New 160-image MNIST subset composed of first 8 examples of each class. Suitable for fast CI. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1ec68e8412f9..f31bb6e6ce3c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -155,11 +155,11 @@ jobs: run: | m=${{ matrix.model }}-cls.pt # official weights b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint - python classify/train.py --imgsz 32 --model $m --data mnist2560 --epochs 1 # train - python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist2560 # val - python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict + python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1 # train + python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160 # val + python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png # predict python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict - python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export + python export.py --weights $b --img 64 --include torchscript # export python - < Date: Tue, 8 Nov 2022 00:58:00 +0100 Subject: [PATCH 120/277] Update Dockerfile keep default torch installation (#10071) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 87605456a5d9..7ec6efaeacba 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip uninstall -y Pillow torchtext # torch torchvision RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From 86decb3c49f91547975d7b7399290eb247888f6f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 15:05:19 +0100 Subject: [PATCH 121/277] Add `ultralytics` pip package (#10103) --- requirements.txt | 23 ++++++++++++----------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8cb1bd4c6fe1..70dd7ce53ba3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,29 +1,32 @@ -# YOLOv5 requirements +# YOLOv5 🚀 requirements # Usage: pip install -r requirements.txt -# Base ---------------------------------------- +# Base ------------------------------------------------------------------------ +ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 +psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) +thop>=0.1.1 # FLOPs computation +torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 -# Logging ------------------------------------- +# Logging --------------------------------------------------------------------- tensorboard>=2.4.1 # clearml>=1.2.0 # comet -# Plotting ------------------------------------ +# Plotting -------------------------------------------------------------------- pandas>=1.1.4 seaborn>=0.11.0 -# Export -------------------------------------- +# Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier @@ -34,14 +37,12 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export -# Deploy -------------------------------------- +# Deploy ---------------------------------------------------------------------- # tritonclient[all]~=2.24.0 -# Extras -------------------------------------- -ipython # interactive notebook -psutil # system utilization -thop>=0.1.1 # FLOPs computation +# Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow +# ultralytics # HUB https://hub.ultralytics.com diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 7ec6efaeacba..a5035c6abc33 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext # torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 6e8ff77545c5..8ec71622d9b6 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ tensorflow-aarch64 # tensorflowjs \ # onnx onnx-simplifier onnxruntime \ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index f3f81ec02c23..017e2826458b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ # openvino-dev \ --extra-index-url https://download.pytorch.org/whl/cpu From 892c4cd4a5a99d9c824ffeb49ce512ee2c9b93e5 Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Wed, 9 Nov 2022 22:21:43 +0800 Subject: [PATCH 122/277] AutoShape integer image-size fix (#10090) Update common.py We have a division at line 694, and then a multiplication at line 695, so it makes `y*g` not an integer. And since `shape1` will be used at line 697 to ensure the size is divisible by the `stride`, this may lead to different image size. In my experiment, my image is [640, 640], it's divisible by the default stride 32, but I found that the result is changed to [672, 672] after line 697. So the final detection result is slightly different from that directly using the `detect.py` script, which does not call the AutoShape methods. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6347e51cdf0b..64f1b9354225 100644 --- a/models/common.py +++ b/models/common.py @@ -692,7 +692,7 @@ def forward(self, ims, size=640, augment=False, profile=False): s = im.shape[:2] # HWC shape0.append(s) # image shape g = max(size) / max(s) # gain - shape1.append([y * g for y in s]) + shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad From c1fcfe8cd9030939dd1635b158984fb066279b22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 17:20:26 +0100 Subject: [PATCH 123/277] YouTube Usage example comments (#10106) * YouTube Usage example comments Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index b33a24a46f9c..621c03cd2db1 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -353,6 +353,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From 75728bb3ea99113f306280b734dedcc5d7d067b1 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Wed, 9 Nov 2022 17:45:09 +0100 Subject: [PATCH 124/277] Mapped project and name to ClearML (#10100) * Mapped project and name to ClearML * Add project and task name docs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/README.md | 10 +++++++++- utils/loggers/clearml/clearml_utils.py | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index e0c5824bc2a2..3cf4c268583f 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -57,12 +57,20 @@ To enable ClearML experiment tracking, simply install the ClearML pip package. pip install clearml>=1.2.0 ``` -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. + +If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. +PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` +or with custom project and task name: +```bash +python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + This will capture: - Source code + uncommitted changes - Installed packages diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index eb1c12ce6cac..fe5f597a87a6 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -85,8 +85,8 @@ def __init__(self, opt, hyp): self.data_dict = None if self.clearml: self.task = Task.init( - project_name='YOLOv5', - task_name='training', + project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, auto_connect_frameworks={'pytorch': False} From 078059c5b3ead9579c53f68c521ed5f0e7e87afa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Nov 2022 18:32:34 +0100 Subject: [PATCH 125/277] Update IoU functions (#10123) Remove box area function and support expandable bbox_iou() calls. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 3b854d4f1583..65ea463c0dab 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -227,13 +227,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy - (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) - b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps @@ -263,11 +263,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 return iou # IoU -def box_area(box): - # box = xyxy(4,n) - return (box[2] - box[0]) * (box[3] - box[1]) - - def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ @@ -282,11 +277,11 @@ def box_iou(box1, box2, eps=1e-7): """ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) def bbox_ioa(box1, box2, eps=1e-7): From 55e95168465b094733e3ef1ec36e0a18f200cd94 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 00:21:29 +0100 Subject: [PATCH 126/277] Add Ultralytics HUB to README (#10070) * Add Ultralytics HUB section to README @pderrenger @kalenmike @AlanDimmer @AyushExel new Ultralytics HUB section in YOLOv5 README. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md @AlanDimmer @kalenmike new integrations image Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 57 ++++++++++++++++++------------------------ README.md | 20 +++++++++++---- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- 4 files changed, 42 insertions(+), 39 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 4184c4c683d0..90d3da8298cc 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -144,47 +144,40 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-##
环境
-使用经过我们验证的环境,几秒钟就可以开始。点击下面的每个图标了解详情。 +##
Integrations
- - -##
如何与第三方集成
+
+ + +
+
- - - - - - - - - + + + + + + + + +
-|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|在[Deci](https://bit.ly/yolov5-deci-platform)一键自动编译和量化YOLOv5以提高推理性能|使用[ClearML](https://cutt.ly/yolov5-readme-clearml) (开源!)自动追踪,可视化,以及远程训练YOLOv5|标记并将您的自定义数据直接导出到YOLOv5后,用[Roboflow](https://roboflow.com/?ref=ultralytics)进行训练 |通过[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)自动跟踪以及可视化你在云端所有的YOLOv5训练运行情况 +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| + + +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + + + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index efe7d4b090bc..5101297782d0 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,7 @@ ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - -##
Quick Start Examples
+See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -71,7 +69,7 @@ pip install -r requirements.txt # install
-
+
Inference YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest @@ -163,7 +161,11 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations
- +
+ + +
+
@@ -184,6 +186,14 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + +
+ + + ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 47559978be74..079bfe3057bc 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -232,7 +232,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index b40f08ef20b3..96f05426b4a8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -626,7 +626,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 7c3827a2d66ce83a4afdffebe55d1bfbd39359d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rub=C3=A9n=20Usamentiaga?= Date: Fri, 11 Nov 2022 20:43:16 +0100 Subject: [PATCH 127/277] Fix benchmark.py usage comment (#10131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update benchmarks.py Signed-off-by: Rubén Usamentiaga Signed-off-by: Rubén Usamentiaga --- benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks.py b/benchmarks.py index ef5c882973f0..03d7d693a936 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -22,7 +22,7 @@ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: - $ python utils/benchmarks.py --weights yolov5s.pt --img 640 + $ python benchmarks.py --weights yolov5s.pt --img 640 """ import argparse From f33718f36f756301b91da6207f1d02f30b3916e1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 21:20:48 +0100 Subject: [PATCH 128/277] Update HUB banner image (#10134) * Update HUB banner image Passed through tinypng for filesize reduction Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 90d3da8298cc..65ecd31a3e69 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -177,7 +177,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index 5101297782d0..0fa95f404117 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
From abbfd695232b1bfcbd8e122e2aeb37fcc3d146d5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Nov 2022 18:54:49 +0100 Subject: [PATCH 129/277] Copy-Paste zero value fix (#10152) * Copy-Paste zero value fix Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/augmentations.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 7c8e0bcdede6..1eae5db8f816 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -250,12 +250,10 @@ def copy_paste(im, labels, segments, p=0.5): if (ioa < 0.30).all(): # allow 30% obscuration of existing labels labels = np.concatenate((labels, [[l[0], *box]]), 0) segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments From ea73386e5a21f6b6d4f2bdc0ba1f9f8a7ced3f2a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 14:19:47 +0100 Subject: [PATCH 130/277] Add Copy-Paste to `mosaic9()` (#10165) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 621c03cd2db1..54d3f7bbba00 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -869,6 +869,7 @@ def load_mosaic9(self, index): # img9, labels9 = replicate(img9, labels9) # replicate # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) img9, labels9 = random_perspective(img9, labels9, segments9, From 9dd40f072386134d5271a902f135e95979de1419 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 16:27:07 +0100 Subject: [PATCH 131/277] Add `join_threads()` (#10086) * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/utils/__init__.py b/utils/__init__.py index 8354d91c4269..7bf3efe6b8c7 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -37,6 +37,16 @@ def wrapper(*args, **kwargs): return wrapper +def join_threads(verbose=False): + # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) + main_thread = threading.current_thread() + for t in threading.enumerate(): + if t is not main_thread: + if verbose: + print(f'Joining thread {t.name}') + t.join() + + def notebook_init(verbose=True): # Check system software and hardware print('Checking setup...') From 5e03f5fc8cbd658e183bb3812fe1c8553cb8cf05 Mon Sep 17 00:00:00 2001 From: Amol Dumrewal Date: Tue, 15 Nov 2022 23:30:33 +0530 Subject: [PATCH 132/277] Fix dataloader filepath modification to perform replace only once and not for all occurences of string (#10163) * Fix dataloader filepath modification to perform only once and not for all occurences of string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 54d3f7bbba00..0418293a6e21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -470,8 +470,8 @@ def __init__(self, with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) else: raise FileNotFoundError(f'{prefix}{p} does not exist') self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) From 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Wed, 16 Nov 2022 08:09:30 -0700 Subject: [PATCH 133/277] fix: prevent logging config clobbering (#10133) Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. Signed-off-by: Ryan Echols Signed-off-by: Ryan Echols --- utils/general.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..76dd2a40b51b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,8 +126,9 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +logger_name = "yolov5" +set_logging(logger_name) # run before defining LOGGER +LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 90575107e7b06d48ef91ffa46a41a55439ebdab1 Mon Sep 17 00:00:00 2001 From: tripleMu Date: Wed, 16 Nov 2022 23:10:15 +0800 Subject: [PATCH 134/277] Filter PyTorch 1.13 UserWarnings (#10166) FilterWarning for torch.distributed._all_gather_base Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 04a3873854ee..fe934abf118c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -32,6 +32,7 @@ # Suppress PyTorch warnings warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +warnings.filterwarnings('ignore', category=UserWarning) def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): From e40662ffdd80a6f108a62cf0d53d06085d943223 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Nov 2022 19:06:19 +0100 Subject: [PATCH 135/277] Revert "fix: prevent logging config clobbering" (#10177) Revert "fix: prevent logging config clobbering (#10133)" This reverts commit 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1. --- utils/general.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 76dd2a40b51b..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,9 +126,8 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -logger_name = "yolov5" -set_logging(logger_name) # run before defining LOGGER -LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From a9f895d304aea5920e694606927fa9208aa7f0ed Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Thu, 17 Nov 2022 20:42:26 +0800 Subject: [PATCH 136/277] Apply make_divisible for ONNX models in Autoshape (#10172) * Apply make_divisible for onnx models in Autoshape At line 697 we have this `make_divisible` function for pytorch models. * Context: we want to run inference on varied input sizes instead of fixed image size. * When I test an image of size [720, 720] for a pytorch model (e.g., yolov5n.pt), we can see that it will be reshaped to [736, 736] by the function. This is as expected. * When I test the same image for the onnx model (e.g., yolov5n.onnx, exported with `--dynamic`), I got an error and it's due to the indivisible problem ``` onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Concat node. Name:'Concat_143' Status Message: concat.cc:156 PrepareForCompute Non concat axis dimensions must match: Axis 3 has mismatched dimensions of 45 and 46 ``` The simple solution is to enable the `make_divisible` function for onnx model too. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * revise indent Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * Apply make_divisible to all formats All formats from DetectMultiBackend should have default stride=32 Signed-off-by: Glenn Jocher Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 64f1b9354225..8b5ec1c786d8 100644 --- a/models/common.py +++ b/models/common.py @@ -694,7 +694,7 @@ def forward(self, ims, size=640, augment=False, profile=False): g = max(size) / max(s) # gain shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 From 1510111b46a24a0c0fa2d685a6f3c96693368654 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Nov 2022 19:22:18 +0100 Subject: [PATCH 137/277] data.yaml `names.keys()` integer assert (#10190) * data.yaml `names.keys()` integer assert Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..2f047b351228 100644 --- a/utils/general.py +++ b/utils/general.py @@ -482,9 +482,10 @@ def check_dataset(data, autodownload=True): # Checks for k in 'train', 'val', 'names': - assert k in data, f"data.yaml '{k}:' field missing ❌" + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") if isinstance(data['names'], (list, tuple)): # old array format data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' data['nc'] = len(data['names']) # Resolve paths From ff6e6e328efe43547bc57d4e02ae8ddc3387ef58 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Thu, 17 Nov 2022 12:47:46 -0700 Subject: [PATCH 138/277] Fix: try 2 - prevent logging config clobbering (#10192) * fix: try 2 - prevent logging config clobbering Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. This is done in a way compatible with Google Colab Signed-off-by: Ryan Echols * chore: fill in comment no-op so a pre-commit hook can auto-format files Signed-off-by: Ryan Echols * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Ryan Echols Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2f047b351228..8f85557a065a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -7,6 +7,7 @@ import glob import inspect import logging +import logging.config import math import os import platform @@ -111,23 +112,33 @@ def is_writeable(dir, test=False): return False -def set_logging(name=None, verbose=VERBOSE): - # Sets level and returns logger - if is_kaggle() or is_colab(): - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - log = logging.getLogger(name) - log.setLevel(level) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter("%(message)s")) - handler.setLevel(level) - log.addHandler(handler) +LOGGING_NAME = "yolov5" -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + name: { + "format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level,}}, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 467a57f01b393989867426261d3e9a95566e3e24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 14:19:46 +0100 Subject: [PATCH 139/277] Segment prediction labels normalization fix (#10205) * normalize_segments * round remove * swap axes fix --- segment/predict.py | 2 +- utils/general.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 3ae68240726a..da1097c047c1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -156,7 +156,7 @@ def run( # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] + segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): diff --git a/utils/general.py b/utils/general.py index 8f85557a065a..c543a237d25b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -822,7 +822,7 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): return boxes -def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -835,6 +835,9 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): segments[:, 1] -= pad[1] # y padding segments /= gain clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height return segments @@ -850,14 +853,14 @@ def clip_boxes(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def clip_segments(boxes, shape): +def clip_segments(segments, shape): # Clip segments (xy1,xy2,...) to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x - boxes[:, 1].clamp_(0, shape[0]) # y + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y else: # np.array (faster grouped) - boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x - boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y def non_max_suppression( From 241d798bb44a2900591786456a61fd73f3993b4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 15:05:25 +0100 Subject: [PATCH 140/277] Created using Colaboratory --- tutorial.ipynb | 320 ++++++++++++++++++++++++------------------------- 1 file changed, 159 insertions(+), 161 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 96f05426b4a8..07a6625a1491 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "collapsed_sections": [], "machine_shape": "hm", "toc_visible": true }, @@ -16,7 +15,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "9b8caa3522fc4cbab31e13b5dfc7808d": { + "13e0e8b77bf54b25b8893f0b4164315f": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -31,14 +30,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", + "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", + "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" } }, - "574140e4c4bc48c9a171541a02cd0211": { + "48037f2f7fea4012b9b341f6aee75297": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -53,13 +52,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", "value": "100%" } }, - "35e03ce5090346c9ae602891470fc555": { + "3f3b925287274893baf5ed7bb0cf6635": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -75,15 +74,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", "value": 818322941 } }, - "c942c208e72d46568b476bb0f2d75496": { + "c44bdca7c9784b20ba2146250ee744d6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -98,13 +97,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", + "value": " 780M/780M [01:27<00:00, 6.98MB/s]" } }, - "65881db1db8a4e9c930fab9172d45143": { + "5b0ed23cd32c4c7d8d9467b7425684ad": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -156,7 +155,7 @@ "width": null } }, - "60b913d755b34d638478e30705a2dde1": { + "1e10b4db5d644cb78bd6e005bb34038a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -208,7 +207,7 @@ "width": null } }, - "0856bea36ec148b68522ff9c9eb258d8": { + "a58728093ecb4eafb826bee11a84c549": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -223,7 +222,7 @@ "description_width": "" } }, - "76879f6f2aa54637a7a07faeea2bd684": { + "9ce169fe4b8543c0b26d745daa230f18": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -275,7 +274,7 @@ "width": null } }, - "0ace3934ec6f4d36a1b3a9e086390926": { + "d5da01aca8fb400c96e76f44c9403581": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -291,7 +290,7 @@ "description_width": "" } }, - "d6b7a2243e0c4beca714d99dceec23d6": { + "98cbaa572fdd4c42975f52015672b3a5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -343,7 +342,7 @@ "width": null } }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "a636aa81f5cc453099c9e552f0986e63": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -403,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -414,20 +413,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n" + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" ] } ] @@ -461,29 +460,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", - "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", + "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -517,27 +516,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + "13e0e8b77bf54b25b8893f0b4164315f", + "48037f2f7fea4012b9b341f6aee75297", + "3f3b925287274893baf5ed7bb0cf6635", + "c44bdca7c9784b20ba2146250ee744d6", + "5b0ed23cd32c4c7d8d9467b7425684ad", + "1e10b4db5d644cb78bd6e005bb34038a", + "a58728093ecb4eafb826bee11a84c549", + "9ce169fe4b8543c0b26d745daa230f18", + "d5da01aca8fb400c96e76f44c9403581", + "98cbaa572fdd4c42975f52015672b3a5", + "a636aa81f5cc453099c9e552f0986e63" ] }, - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -548,7 +547,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "9b8caa3522fc4cbab31e13b5dfc7808d" + "model_id": "13e0e8b77bf54b25b8893f0b4164315f" } }, "metadata": {} @@ -562,45 +561,43 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" }, "source": [ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 52.7MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10509.20it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [00:50<00:00, 3.10it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 1.0ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.81s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.62s)\n", + "DONE (t=6.19s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=77.03s).\n", + "DONE (t=75.81s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.63s).\n", + "DONE (t=15.26s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -612,7 +609,7 @@ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.724\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.723\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ] } @@ -664,7 +661,8 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -679,13 +677,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -693,17 +691,17 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", + "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -731,120 +729,120 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", + "Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", "\n", "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 16:32:29 +0100 Subject: [PATCH 141/277] Created using Colaboratory --- segment/tutorial.ipynb | 1500 +++++++++++++++------------------------- 1 file changed, 572 insertions(+), 928 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 079bfe3057bc..c26878fb0dbf 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -1,956 +1,600 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", - "\n", - "```shell\n", - "python segment/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", - "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "89f5f0a84ca642378724f1bf05f17e0d", - "version_major": 2, - "version_minor": 0 + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" }, - "text/plain": [ - " 0%| | 0.00/6.79M [00:00

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train-seg\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", - "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", - "\n", - "Transferred 367/367 items from yolov5s-seg.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", - "\n", - "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", - "Plotting labels to runs/train-seg/exp/labels.jpg... \n", - "Image sizes 640 train, 640 val\n", - "Using 4 dataloader workers\n", - "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", - "Starting training for 3 epochs...\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", - "\n", - "3 epochs completed in 0.006 hours.\n", - "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", - "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", - "\n", - "Validating runs/train-seg/exp/weights/best.pt...\n", - "Fusing layers... \n", - "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", - " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", - " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", - " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", - " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", - " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", - " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", - " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", - " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", - " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", - " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", - " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", - " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", - " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", - " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", - " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", - " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", - " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", - " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", - " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", - " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", - " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", - " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", - " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", - " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", - " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", - " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", - " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", - " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", - " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", - " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", - " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", - " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", - " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", - " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", - " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", - " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", - " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", - " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", - " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", - " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", - " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", - " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", - " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", - " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", - " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", - " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", - " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", - " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", - " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", - " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", - " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", - " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", - " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", - " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", - " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", - " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", - " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", - " mouse 128 2 1 0 0 0 1 0 0 0\n", - " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", - " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", - " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", - " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", - " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", - " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", - " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", - " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", - " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", - " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", - " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", - " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", - "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", + "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", + "######################################################################## 100.0%\n", + "######################################################################## 100.0%\n" + ] + } + ], + "source": [ + "# Download COCO val\n", + "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", + "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s-seg on COCO val\n", + "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", + "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", + "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", + "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 74b3886edd55bc9b681b8a956275abb9e6e1e2cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:18:57 +0100 Subject: [PATCH 142/277] Simplify dataloader tqdm descriptions (#10210) * Simplify dataloader tqdm descriptions @AyushExel this should help our tqdm dataloader messages fit better within a single line in our Colab notebooks and also help avoid confusion about missing/empty labels, now combined into 'backgrounds'. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 0418293a6e21..39db3c0dfd21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -31,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, - xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -493,7 +493,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + desc = f"{prefix}Scanning {path.parent / path.stem}..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" pbar.close() if msgs: From 0322bb31962d68caefa0c0c5880d80d27e8ab8ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:39:13 +0100 Subject: [PATCH 143/277] New global `TQDM_BAR_FORMAT` (#10211) * New global TQDM_BAR_FORMAT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 6 +++--- classify/val.py | 5 +++-- segment/train.py | 10 +++++----- segment/val.py | 8 ++++---- train.py | 11 ++++++----- utils/autoanchor.py | 4 ++-- utils/dataloaders.py | 15 +++++++-------- utils/general.py | 1 + val.py | 8 ++++---- 9 files changed, 35 insertions(+), 33 deletions(-) diff --git a/classify/train.py b/classify/train.py index 178ebcdfff53..4422ca26b0ae 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,8 +40,8 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, - download, increment_path, init_seeds, print_args, yaml_save) +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, + check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, @@ -174,7 +174,7 @@ def train(opt, device): trainloader.sampler.set_epoch(epoch) pbar = enumerate(trainloader) if RANK in {-1, 0}: - pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT) for i, (images, labels) in pbar: # progress bar images, labels = images.to(device, non_blocking=True), labels.to(device) diff --git a/classify/val.py b/classify/val.py index c0b507785fb0..8657036fb2a2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -36,7 +36,8 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, + increment_path, print_args) from utils.torch_utils import select_device, smart_inference_mode @@ -100,7 +101,7 @@ def run( n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) + bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: with dt[0]: diff --git a/segment/train.py b/segment/train.py index f067918e7c3c..2a0793d1aa3e 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,10 +46,10 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, - print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels from utils.segment.dataloaders import create_dataloader @@ -277,7 +277,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio LOGGER.info(('\n' + '%11s' * 8) % ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ # callbacks.run('on_train_batch_start') diff --git a/segment/val.py b/segment/val.py index a875b3b79907..9bb8f9e4cf54 100644 --- a/segment/val.py +++ b/segment/val.py @@ -42,9 +42,9 @@ from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks -from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -237,7 +237,7 @@ def run( loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: diff --git a/train.py b/train.py index 1fe6cf4d9ebd..bbbd6d07db00 100644 --- a/train.py +++ b/train.py @@ -47,10 +47,11 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume from utils.loss import ComputeLoss @@ -275,7 +276,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio pbar = enumerate(train_loader) LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- callbacks.run('on_train_batch_start') diff --git a/utils/autoanchor.py b/utils/autoanchor.py index cfc4c276e3aa..bb5cf6e6965e 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -11,7 +11,7 @@ from tqdm import tqdm from utils import TryExcept -from utils.general import LOGGER, colorstr +from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -153,7 +153,7 @@ def print_results(k, verbose=True): # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 39db3c0dfd21..e107d1a2bccf 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,17 +29,16 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - cutout, letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders @@ -494,7 +493,7 @@ def __init__(self, nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' @@ -576,7 +575,7 @@ def __init__(self, self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': b += self.npy_files[i].stat().st_size @@ -612,7 +611,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.im_files), - bar_format=BAR_FORMAT) + bar_format=TQDM_BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f diff --git a/utils/general.py b/utils/general.py index c543a237d25b..58181f00568d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -50,6 +50,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') diff --git a/val.py b/val.py index 127acf810029..ef282e37bdc1 100644 --- a/val.py +++ b/val.py @@ -38,9 +38,9 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -193,7 +193,7 @@ def run( loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') with dt[0]: From 6f377f9d8a7f24a0766d2cfdef6d1e18873d33f9 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:05:45 -0700 Subject: [PATCH 144/277] Feature/classification tutorial refactor (#10039) * Added a tutorial notebook for classification. * Split a cell so that there is less room for error when a user pastes their own code snippet. Also added an active learning section at the end. * Added a section to the classification tutorial notebook about the various methods of input for `classify/predict.py`. * Updated link to colab * WIP commit to show some of the errors when trying to match the main tutorial. * Refactored the classification tutorial to be closer to the main tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- classify/tutorial.ipynb | 1843 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 1843 insertions(+) create mode 100644 classify/tutorial.ipynb diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb new file mode 100644 index 000000000000..8ed8b5db8a35 --- /dev/null +++ b/classify/tutorial.ipynb @@ -0,0 +1,1843 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", + "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [], + "source": [ + "# Download Imagenet val\n", + "!bash data/scripts/get_imagenet.sh --val" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.734 0.914\n", + " tench 50 0.92 0.98\n", + " goldfish 50 0.86 0.98\n", + " great white shark 50 0.76 0.94\n", + " tiger shark 50 0.84 0.96\n", + " hammerhead shark 50 0.88 0.98\n", + " electric ray 50 0.76 0.88\n", + " stingray 50 0.74 0.94\n", + " cock 50 0.74 0.94\n", + " hen 50 0.86 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.98\n", + " goldfinch 50 0.92 1\n", + " house finch 50 0.92 1\n", + " junco 50 0.98 1\n", + " indigo bunting 50 0.86 0.94\n", + " American robin 50 0.94 1\n", + " bulbul 50 0.88 0.92\n", + " jay 50 0.92 0.98\n", + " magpie 50 0.9 0.98\n", + " chickadee 50 0.96 1\n", + " American dipper 50 0.86 0.92\n", + " kite 50 0.8 0.94\n", + " bald eagle 50 0.9 0.98\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.96 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.66 0.98\n", + " newt 50 0.74 0.84\n", + " spotted salamander 50 0.9 0.98\n", + " axolotl 50 0.9 0.98\n", + " American bullfrog 50 0.8 0.92\n", + " tree frog 50 0.8 0.94\n", + " tailed frog 50 0.5 0.82\n", + " loggerhead sea turtle 50 0.7 0.92\n", + " leatherback sea turtle 50 0.58 0.8\n", + " mud turtle 50 0.58 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.88 1\n", + " banded gecko 50 0.78 0.9\n", + " green iguana 50 0.78 0.92\n", + " Carolina anole 50 0.62 0.98\n", + "desert grassland whiptail lizard 50 0.88 0.96\n", + " agama 50 0.78 0.96\n", + " frilled-necked lizard 50 0.82 0.94\n", + " alligator lizard 50 0.64 0.84\n", + " Gila monster 50 0.76 0.86\n", + " European green lizard 50 0.5 0.96\n", + " chameleon 50 0.78 0.9\n", + " Komodo dragon 50 0.9 1\n", + " Nile crocodile 50 0.66 0.92\n", + " American alligator 50 0.78 0.98\n", + " triceratops 50 0.96 0.98\n", + " worm snake 50 0.76 0.9\n", + " ring-necked snake 50 0.84 0.96\n", + " eastern hog-nosed snake 50 0.62 0.86\n", + " smooth green snake 50 0.64 0.96\n", + " kingsnake 50 0.78 0.94\n", + " garter snake 50 0.86 0.98\n", + " water snake 50 0.78 0.92\n", + " vine snake 50 0.72 0.86\n", + " night snake 50 0.34 0.86\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.52 0.82\n", + " Indian cobra 50 0.8 0.94\n", + " green mamba 50 0.56 0.92\n", + " sea snake 50 0.76 0.94\n", + " Saharan horned viper 50 0.48 0.88\n", + "eastern diamondback rattlesnake 50 0.72 0.92\n", + " sidewinder 50 0.38 0.92\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.88 0.94\n", + " yellow garden spider 50 0.88 0.96\n", + " barn spider 50 0.38 0.96\n", + " European garden spider 50 0.6 0.98\n", + " southern black widow 50 0.84 0.98\n", + " tarantula 50 0.94 0.98\n", + " wolf spider 50 0.7 0.92\n", + " tick 50 0.76 0.82\n", + " centipede 50 0.74 0.86\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.84 0.98\n", + " ruffed grouse 50 0.9 1\n", + " prairie grouse 50 0.9 0.96\n", + " peacock 50 0.9 0.9\n", + " quail 50 0.88 0.94\n", + " partridge 50 0.66 0.94\n", + " grey parrot 50 0.94 0.98\n", + " macaw 50 0.92 0.98\n", + "sulphur-crested cockatoo 50 0.94 0.98\n", + " lorikeet 50 0.98 1\n", + " coucal 50 0.9 0.92\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.86 0.98\n", + " hummingbird 50 0.9 0.98\n", + " jacamar 50 0.94 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.78 0.94\n", + " red-breasted merganser 50 0.94 0.98\n", + " goose 50 0.76 0.98\n", + " black swan 50 0.94 1\n", + " tusker 50 0.58 0.92\n", + " echidna 50 1 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.86 0.92\n", + " koala 50 0.84 0.98\n", + " wombat 50 0.82 0.86\n", + " jellyfish 50 0.94 0.96\n", + " sea anemone 50 0.66 0.98\n", + " brain coral 50 0.9 0.96\n", + " flatworm 50 0.76 1\n", + " nematode 50 0.9 0.92\n", + " conch 50 0.74 0.92\n", + " snail 50 0.78 0.86\n", + " slug 50 0.78 0.9\n", + " sea slug 50 0.94 0.98\n", + " chiton 50 0.86 0.96\n", + " chambered nautilus 50 0.86 0.94\n", + " Dungeness crab 50 0.86 0.96\n", + " rock crab 50 0.66 0.88\n", + " fiddler crab 50 0.64 0.88\n", + " red king crab 50 0.78 0.92\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.78 0.88\n", + " crayfish 50 0.56 0.84\n", + " hermit crab 50 0.82 0.96\n", + " isopod 50 0.62 0.74\n", + " white stork 50 0.88 0.94\n", + " black stork 50 0.86 0.96\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.98\n", + " bittern 50 0.9 0.92\n", + " crane (bird) 50 0.64 0.94\n", + " limpkin 50 0.96 0.98\n", + " common gallinule 50 0.96 0.96\n", + " American coot 50 0.94 1\n", + " bustard 50 0.96 0.98\n", + " ruddy turnstone 50 0.96 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.92 0.96\n", + " dowitcher 50 0.9 1\n", + " oystercatcher 50 0.9 0.96\n", + " pelican 50 0.96 1\n", + " king penguin 50 0.88 0.92\n", + " albatross 50 0.9 0.98\n", + " grey whale 50 0.86 0.94\n", + " killer whale 50 0.9 0.98\n", + " dugong 50 0.88 0.94\n", + " sea lion 50 0.78 0.98\n", + " Chihuahua 50 0.56 0.82\n", + " Japanese Chin 50 0.7 0.98\n", + " Maltese 50 0.86 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.68 0.94\n", + " King Charles Spaniel 50 0.92 0.98\n", + " Papillon 50 0.92 0.94\n", + " toy terrier 50 0.48 0.96\n", + " Rhodesian Ridgeback 50 0.76 0.94\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Afghan Hound 50 0.9 0.98\n", + " Basset Hound 50 0.78 0.9\n", + " Beagle 50 0.82 0.98\n", + " Bloodhound 50 0.5 0.78\n", + " Bluetick Coonhound 50 0.84 0.94\n", + " Black and Tan Coonhound 50 0.46 0.8\n", + "Treeing Walker Coonhound 50 0.58 0.98\n", + " English foxhound 50 0.24 0.8\n", + " Redbone Coonhound 50 0.66 0.92\n", + " borzoi 50 0.94 1\n", + " Irish Wolfhound 50 0.64 0.9\n", + " Italian Greyhound 50 0.8 0.98\n", + " Whippet 50 0.82 0.98\n", + " Ibizan Hound 50 0.64 0.92\n", + " Norwegian Elkhound 50 0.88 1\n", + " Otterhound 50 0.58 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 1\n", + " Weimaraner 50 0.88 0.96\n", + "Staffordshire Bull Terrier 50 0.62 0.92\n", + "American Staffordshire Terrier 50 0.66 0.92\n", + " Bedlington Terrier 50 0.82 0.96\n", + " Border Terrier 50 0.9 0.98\n", + " Kerry Blue Terrier 50 0.82 1\n", + " Irish Terrier 50 0.74 0.94\n", + " Norfolk Terrier 50 0.74 0.92\n", + " Norwich Terrier 50 0.68 0.98\n", + " Yorkshire Terrier 50 0.66 0.88\n", + " Wire Fox Terrier 50 0.66 0.96\n", + " Lakeland Terrier 50 0.82 0.94\n", + " Sealyham Terrier 50 0.74 0.9\n", + " Airedale Terrier 50 0.82 0.9\n", + " Cairn Terrier 50 0.82 0.94\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.84 0.9\n", + " Boston Terrier 50 0.88 1\n", + " Miniature Schnauzer 50 0.7 0.92\n", + " Giant Schnauzer 50 0.82 1\n", + " Standard Schnauzer 50 0.72 0.98\n", + " Scottish Terrier 50 0.78 0.94\n", + " Tibetan Terrier 50 0.64 0.98\n", + "Australian Silky Terrier 50 0.72 0.96\n", + "Soft-coated Wheaten Terrier 50 0.86 0.98\n", + "West Highland White Terrier 50 0.94 0.98\n", + " Lhasa Apso 50 0.66 0.96\n", + " Flat-Coated Retriever 50 0.78 1\n", + " Curly-coated Retriever 50 0.84 0.96\n", + " Golden Retriever 50 0.88 0.96\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.86 0.98\n", + "German Shorthaired Pointer 50 0.84 0.96\n", + " Vizsla 50 0.7 0.94\n", + " English Setter 50 0.8 1\n", + " Irish Setter 50 0.78 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.86 0.98\n", + " Clumber Spaniel 50 0.9 0.96\n", + "English Springer Spaniel 50 0.96 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.96\n", + " Sussex Spaniel 50 0.7 0.88\n", + " Irish Water Spaniel 50 0.86 0.94\n", + " Kuvasz 50 0.7 0.92\n", + " Schipperke 50 0.94 0.98\n", + " Groenendael 50 0.78 0.92\n", + " Malinois 50 0.92 0.98\n", + " Briard 50 0.6 0.84\n", + " Australian Kelpie 50 0.74 0.96\n", + " Komondor 50 0.9 0.96\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.72 0.94\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.82 0.96\n", + " Bouvier des Flandres 50 0.78 0.96\n", + " Rottweiler 50 0.94 0.98\n", + " German Shepherd Dog 50 0.76 0.98\n", + " Dobermann 50 0.74 1\n", + " Miniature Pinscher 50 0.76 0.96\n", + "Greater Swiss Mountain Dog 50 0.66 0.94\n", + " Bernese Mountain Dog 50 0.94 1\n", + " Appenzeller Sennenhund 50 0.3 1\n", + " Entlebucher Sennenhund 50 0.72 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.8 0.98\n", + " Tibetan Mastiff 50 0.92 0.98\n", + " French Bulldog 50 0.86 0.98\n", + " Great Dane 50 0.6 0.92\n", + " St. Bernard 50 0.94 1\n", + " husky 50 0.5 0.94\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.56 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.76 0.92\n", + " Basenji 50 0.9 1\n", + " pug 50 0.96 0.98\n", + " Leonberger 50 0.98 1\n", + " Newfoundland 50 0.82 0.96\n", + " Pyrenean Mountain Dog 50 0.76 0.94\n", + " Samoyed 50 0.9 0.98\n", + " Pomeranian 50 0.96 1\n", + " Chow Chow 50 0.88 0.96\n", + " Keeshond 50 0.94 1\n", + " Griffon Bruxellois 50 0.92 0.98\n", + " Pembroke Welsh Corgi 50 0.9 0.98\n", + " Cardigan Welsh Corgi 50 0.7 0.94\n", + " Toy Poodle 50 0.52 0.96\n", + " Miniature Poodle 50 0.56 0.92\n", + " Standard Poodle 50 0.78 0.96\n", + " Mexican hairless dog 50 0.86 0.98\n", + " grey wolf 50 0.74 0.92\n", + " Alaskan tundra wolf 50 0.86 0.98\n", + " red wolf 50 0.54 0.92\n", + " coyote 50 0.62 0.82\n", + " dingo 50 0.76 0.94\n", + " dhole 50 0.9 0.96\n", + " African wild dog 50 1 1\n", + " hyena 50 0.9 0.94\n", + " red fox 50 0.62 0.92\n", + " kit fox 50 0.7 0.98\n", + " Arctic fox 50 0.92 0.98\n", + " grey fox 50 0.66 0.96\n", + " tabby cat 50 0.58 0.92\n", + " tiger cat 50 0.2 0.94\n", + " Persian cat 50 0.92 1\n", + " Siamese cat 50 0.94 0.98\n", + " Egyptian Mau 50 0.52 0.84\n", + " cougar 50 0.94 0.96\n", + " lynx 50 0.74 0.9\n", + " leopard 50 0.86 1\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.72 0.92\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.96 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.9 0.98\n", + " American black bear 50 0.9 0.98\n", + " polar bear 50 0.86 0.94\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.86\n", + " meerkat 50 0.82 0.98\n", + " tiger beetle 50 0.9 0.94\n", + " ladybug 50 0.78 0.98\n", + " ground beetle 50 0.62 0.94\n", + " longhorn beetle 50 0.58 0.9\n", + " leaf beetle 50 0.66 0.98\n", + " dung beetle 50 0.88 0.98\n", + " rhinoceros beetle 50 0.88 1\n", + " weevil 50 0.92 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.8 0.96\n", + " ant 50 0.68 0.84\n", + " grasshopper 50 0.48 0.9\n", + " cricket 50 0.66 0.94\n", + " stick insect 50 0.7 0.94\n", + " cockroach 50 0.72 0.84\n", + " mantis 50 0.72 0.9\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.9 0.96\n", + " lacewing 50 0.8 0.94\n", + " dragonfly 50 0.76 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.96 0.96\n", + " ringlet 50 0.88 1\n", + " monarch butterfly 50 0.9 0.96\n", + " small white 50 0.88 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.9 1\n", + " starfish 50 0.82 0.94\n", + " sea urchin 50 0.84 0.98\n", + " sea cucumber 50 0.76 0.92\n", + " cottontail rabbit 50 0.7 0.98\n", + " hare 50 0.9 1\n", + " Angora rabbit 50 0.92 0.98\n", + " hamster 50 1 1\n", + " porcupine 50 0.9 0.98\n", + " fox squirrel 50 0.82 0.96\n", + " marmot 50 0.94 0.96\n", + " beaver 50 0.78 0.96\n", + " guinea pig 50 0.78 0.92\n", + " common sorrel 50 0.98 0.98\n", + " zebra 50 0.96 0.98\n", + " pig 50 0.54 0.82\n", + " wild boar 50 0.86 0.96\n", + " warthog 50 0.96 0.96\n", + " hippopotamus 50 0.9 1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ox 50 0.52 0.94\n", + " water buffalo 50 0.86 0.94\n", + " bison 50 0.9 0.98\n", + " ram 50 0.62 0.98\n", + " bighorn sheep 50 0.72 1\n", + " Alpine ibex 50 0.96 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.86 0.98\n", + " gazelle 50 0.74 0.96\n", + " dromedary 50 0.94 1\n", + " llama 50 0.86 0.94\n", + " weasel 50 0.42 0.96\n", + " mink 50 0.78 0.92\n", + " European polecat 50 0.54 0.88\n", + " black-footed ferret 50 0.74 0.96\n", + " otter 50 0.68 0.9\n", + " skunk 50 0.94 0.96\n", + " badger 50 0.88 0.92\n", + " armadillo 50 0.88 0.96\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.82 0.9\n", + " gorilla 50 0.78 0.94\n", + " chimpanzee 50 0.86 0.94\n", + " gibbon 50 0.74 0.9\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.82 0.96\n", + " patas monkey 50 0.66 0.86\n", + " baboon 50 0.88 0.96\n", + " macaque 50 0.72 0.84\n", + " langur 50 0.56 0.78\n", + " black-and-white colobus 50 0.84 0.92\n", + " proboscis monkey 50 0.98 1\n", + " marmoset 50 0.7 0.92\n", + " white-headed capuchin 50 0.82 0.94\n", + " howler monkey 50 0.9 0.96\n", + " titi 50 0.54 0.9\n", + "Geoffroy's spider monkey 50 0.36 0.86\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.7 0.94\n", + " indri 50 0.86 0.98\n", + " Asian elephant 50 0.54 0.96\n", + " African bush elephant 50 0.62 0.96\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.92 0.98\n", + " snoek 50 0.76 0.9\n", + " eel 50 0.58 0.86\n", + " coho salmon 50 0.8 0.98\n", + " rock beauty 50 0.8 0.96\n", + " clownfish 50 0.8 0.98\n", + " sturgeon 50 0.76 0.96\n", + " garfish 50 0.7 0.82\n", + " lionfish 50 0.94 0.98\n", + " pufferfish 50 0.86 0.98\n", + " abacus 50 0.8 0.88\n", + " abaya 50 0.72 0.94\n", + " academic gown 50 0.44 0.94\n", + " accordion 50 0.78 0.96\n", + " acoustic guitar 50 0.54 0.78\n", + " aircraft carrier 50 0.7 0.98\n", + " airliner 50 0.92 1\n", + " airship 50 0.8 0.88\n", + " altar 50 0.6 0.94\n", + " ambulance 50 0.84 0.98\n", + " amphibious vehicle 50 0.68 0.9\n", + " analog clock 50 0.5 0.88\n", + " apiary 50 0.9 1\n", + " apron 50 0.68 0.86\n", + " waste container 50 0.6 0.86\n", + " assault rifle 50 0.36 0.9\n", + " backpack 50 0.36 0.72\n", + " bakery 50 0.38 0.64\n", + " balance beam 50 0.84 0.98\n", + " balloon 50 0.88 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.68 0.96\n", + " banjo 50 0.9 1\n", + " baluster 50 0.74 0.94\n", + " barbell 50 0.58 0.9\n", + " barber chair 50 0.72 0.9\n", + " barbershop 50 0.64 0.9\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.86 0.96\n", + " barrel 50 0.64 0.86\n", + " wheelbarrow 50 0.64 0.92\n", + " baseball 50 0.76 0.96\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.8 0.94\n", + " bassoon 50 0.84 0.98\n", + " swimming cap 50 0.7 0.88\n", + " bath towel 50 0.56 0.84\n", + " bathtub 50 0.34 0.86\n", + " station wagon 50 0.68 0.9\n", + " lighthouse 50 0.74 0.96\n", + " beaker 50 0.46 0.7\n", + " military cap 50 0.88 0.98\n", + " beer bottle 50 0.72 0.9\n", + " beer glass 50 0.72 0.9\n", + " bell-cot 50 0.6 0.96\n", + " bib 50 0.58 0.86\n", + " tandem bicycle 50 0.76 0.96\n", + " bikini 50 0.52 0.88\n", + " ring binder 50 0.7 0.86\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.96\n", + " boathouse 50 0.78 0.96\n", + " bobsleigh 50 0.94 0.96\n", + " bolo tie 50 0.86 0.88\n", + " poke bonnet 50 0.68 0.88\n", + " bookcase 50 0.68 0.92\n", + " bookstore 50 0.58 0.88\n", + " bottle cap 50 0.62 0.8\n", + " bow 50 0.74 0.84\n", + " bow tie 50 0.68 0.92\n", + " brass 50 0.92 0.98\n", + " bra 50 0.52 0.76\n", + " breakwater 50 0.64 0.94\n", + " breastplate 50 0.36 0.9\n", + " broom 50 0.58 0.84\n", + " bucket 50 0.58 0.88\n", + " buckle 50 0.5 0.76\n", + " bulletproof vest 50 0.52 0.76\n", + " high-speed train 50 0.94 0.98\n", + " butcher shop 50 0.76 0.94\n", + " taxicab 50 0.7 0.92\n", + " cauldron 50 0.5 0.72\n", + " candle 50 0.5 0.76\n", + " cannon 50 0.88 0.96\n", + " canoe 50 0.94 1\n", + " can opener 50 0.72 0.88\n", + " cardigan 50 0.66 0.88\n", + " car mirror 50 0.94 0.98\n", + " carousel 50 0.96 0.96\n", + " tool kit 50 0.68 0.84\n", + " carton 50 0.44 0.78\n", + " car wheel 50 0.4 0.78\n", + "automated teller machine 50 0.82 0.94\n", + " cassette 50 0.62 0.84\n", + " cassette player 50 0.3 0.92\n", + " castle 50 0.74 0.9\n", + " catamaran 50 0.74 0.98\n", + " CD player 50 0.52 0.8\n", + " cello 50 0.84 1\n", + " mobile phone 50 0.72 0.86\n", + " chain 50 0.34 0.78\n", + " chain-link fence 50 0.7 0.86\n", + " chain mail 50 0.68 0.86\n", + " chainsaw 50 0.88 0.96\n", + " chest 50 0.7 0.88\n", + " chiffonier 50 0.32 0.64\n", + " chime 50 0.64 0.84\n", + " china cabinet 50 0.78 0.94\n", + " Christmas stocking 50 0.92 0.98\n", + " church 50 0.6 0.86\n", + " movie theater 50 0.68 0.9\n", + " cleaver 50 0.36 0.68\n", + " cliff dwelling 50 0.86 1\n", + " cloak 50 0.28 0.7\n", + " clogs 50 0.6 0.88\n", + " cocktail shaker 50 0.62 0.76\n", + " coffee mug 50 0.48 0.78\n", + " coffeemaker 50 0.62 0.92\n", + " coil 50 0.64 0.86\n", + " combination lock 50 0.62 0.92\n", + " computer keyboard 50 0.72 0.92\n", + " confectionery store 50 0.56 0.84\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 1\n", + " corkscrew 50 0.84 0.98\n", + " cornet 50 0.56 0.98\n", + " cowboy boot 50 0.66 0.78\n", + " cowboy hat 50 0.66 0.88\n", + " cradle 50 0.34 0.8\n", + " crane (machine) 50 0.8 0.92\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.6 0.86\n", + " infant bed 50 0.8 0.96\n", + " Crock Pot 50 0.78 0.88\n", + " croquet ball 50 0.9 1\n", + " crutch 50 0.42 0.7\n", + " cuirass 50 0.54 0.92\n", + " dam 50 0.78 0.92\n", + " desk 50 0.68 0.88\n", + " desktop computer 50 0.54 0.9\n", + " rotary dial telephone 50 0.92 0.96\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.6 0.8\n", + " digital watch 50 0.56 0.82\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " dining table 50 0.78 0.88\n", + " dishcloth 50 0.98 1\n", + " dishwasher 50 0.52 0.74\n", + " disc brake 50 0.96 1\n", + " dock 50 0.56 0.96\n", + " dog sled 50 0.9 0.98\n", + " dome 50 0.74 0.96\n", + " doormat 50 0.6 0.82\n", + " drilling rig 50 0.82 0.94\n", + " drum 50 0.4 0.72\n", + " drumstick 50 0.56 0.82\n", + " dumbbell 50 0.6 0.92\n", + " Dutch oven 50 0.66 0.88\n", + " electric fan 50 0.82 0.84\n", + " electric guitar 50 0.66 0.92\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.92 1\n", + " envelope 50 0.58 0.88\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.76 0.92\n", + " feather boa 50 0.8 0.88\n", + " filing cabinet 50 0.84 0.98\n", + " fireboat 50 0.96 0.96\n", + " fire engine 50 0.82 0.92\n", + " fire screen sheet 50 0.52 0.78\n", + " flagpole 50 0.76 0.92\n", + " flute 50 0.4 0.76\n", + " folding chair 50 0.68 0.9\n", + " football helmet 50 0.9 0.96\n", + " forklift 50 0.8 0.94\n", + " fountain 50 0.88 0.92\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.82 0.92\n", + " freight car 50 0.98 0.98\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.48 0.82\n", + " fur coat 50 0.86 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.82 0.92\n", + " gas pump 50 0.82 0.98\n", + " goblet 50 0.64 0.9\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.86 0.96\n", + " golf cart 50 0.76 0.9\n", + " gondola 50 0.94 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.72 0.94\n", + " grand piano 50 0.74 0.96\n", + " greenhouse 50 0.84 1\n", + " grille 50 0.72 0.88\n", + " grocery store 50 0.68 0.9\n", + " guillotine 50 0.84 0.94\n", + " barrette 50 0.48 0.68\n", + " hair spray 50 0.4 0.76\n", + " half-track 50 0.76 0.96\n", + " hammer 50 0.54 0.78\n", + " hamper 50 0.72 0.9\n", + " hair dryer 50 0.7 0.8\n", + " hand-held computer 50 0.52 0.88\n", + " handkerchief 50 0.8 0.96\n", + " hard disk drive 50 0.78 0.86\n", + " harmonica 50 0.68 0.96\n", + " harp 50 0.9 0.96\n", + " harvester 50 0.86 1\n", + " hatchet 50 0.6 0.84\n", + " holster 50 0.7 0.84\n", + " home theater 50 0.72 0.96\n", + " honeycomb 50 0.74 0.86\n", + " hook 50 0.28 0.62\n", + " hoop skirt 50 0.68 0.8\n", + " horizontal bar 50 0.76 0.98\n", + " horse-drawn vehicle 50 0.9 0.9\n", + " hourglass 50 0.92 0.98\n", + " iPod 50 0.9 0.94\n", + " clothes iron 50 0.72 0.9\n", + " jack-o'-lantern 50 0.94 0.98\n", + " jeans 50 0.7 0.82\n", + " jeep 50 0.76 0.9\n", + " T-shirt 50 0.72 0.94\n", + " jigsaw puzzle 50 0.92 0.96\n", + " pulled rickshaw 50 0.88 0.96\n", + " joystick 50 0.74 0.98\n", + " kimono 50 0.78 0.94\n", + " knee pad 50 0.7 0.86\n", + " knot 50 0.8 0.86\n", + " lab coat 50 0.82 0.98\n", + " ladle 50 0.26 0.64\n", + " lampshade 50 0.62 0.8\n", + " laptop computer 50 0.2 0.88\n", + " lawn mower 50 0.8 0.96\n", + " lens cap 50 0.5 0.8\n", + " paper knife 50 0.3 0.58\n", + " library 50 0.62 0.92\n", + " lifeboat 50 0.94 0.98\n", + " lighter 50 0.56 0.8\n", + " limousine 50 0.74 0.92\n", + " ocean liner 50 0.88 0.96\n", + " lipstick 50 0.7 0.88\n", + " slip-on shoe 50 0.82 0.94\n", + " lotion 50 0.56 0.9\n", + " speaker 50 0.58 0.64\n", + " loupe 50 0.32 0.54\n", + " sawmill 50 0.74 0.9\n", + " magnetic compass 50 0.48 0.78\n", + " mail bag 50 0.64 0.94\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.28 0.9\n", + " tank suit 50 0.3 0.88\n", + " manhole cover 50 0.94 0.98\n", + " maraca 50 0.72 0.86\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.48 0.78\n", + " match 50 0.74 0.92\n", + " maypole 50 0.96 1\n", + " maze 50 0.82 1\n", + " measuring cup 50 0.66 0.82\n", + " medicine chest 50 0.6 0.9\n", + " megalith 50 0.84 0.92\n", + " microphone 50 0.56 0.74\n", + " microwave oven 50 0.56 0.8\n", + " military uniform 50 0.62 0.86\n", + " milk can 50 0.7 0.82\n", + " minibus 50 0.68 1\n", + " miniskirt 50 0.58 0.84\n", + " minivan 50 0.48 0.8\n", + " missile 50 0.34 0.82\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.82 0.98\n", + " mobile home 50 0.58 0.8\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.7 0.9\n", + " monastery 50 0.52 0.86\n", + " monitor 50 0.34 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.72 0.88\n", + " square academic cap 50 0.48 0.82\n", + " mosque 50 0.98 1\n", + " mosquito net 50 0.96 0.98\n", + " scooter 50 0.88 0.98\n", + " mountain bike 50 0.74 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.38 0.82\n", + " mousetrap 50 0.82 0.9\n", + " moving van 50 0.48 0.8\n", + " muzzle 50 0.5 0.74\n", + " nail 50 0.68 0.76\n", + " neck brace 50 0.62 0.72\n", + " necklace 50 0.92 1\n", + " nipple 50 0.8 0.92\n", + " notebook computer 50 0.34 0.88\n", + " obelisk 50 0.82 0.94\n", + " oboe 50 0.62 0.84\n", + " ocarina 50 0.82 0.88\n", + " odometer 50 0.98 1\n", + " oil filter 50 0.6 0.82\n", + " organ 50 0.84 0.94\n", + " oscilloscope 50 0.94 0.96\n", + " overskirt 50 0.2 0.62\n", + " bullock cart 50 0.76 0.94\n", + " oxygen mask 50 0.48 0.8\n", + " packet 50 0.54 0.74\n", + " paddle 50 0.7 0.94\n", + " paddle wheel 50 0.92 0.98\n", + " padlock 50 0.64 0.78\n", + " paintbrush 50 0.66 0.78\n", + " pajamas 50 0.68 0.94\n", + " palace 50 0.66 0.94\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.68 0.86\n", + " parachute 50 0.92 0.96\n", + " parallel bars 50 0.68 0.96\n", + " park bench 50 0.82 0.94\n", + " parking meter 50 0.86 0.98\n", + " passenger car 50 0.48 0.86\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " patio 50 0.6 0.84\n", + " payphone 50 0.78 0.94\n", + " pedestal 50 0.66 0.86\n", + " pencil case 50 0.74 0.98\n", + " pencil sharpener 50 0.6 0.76\n", + " perfume 50 0.66 0.96\n", + " Petri dish 50 0.64 0.82\n", + " photocopier 50 0.94 1\n", + " plectrum 50 0.72 0.92\n", + " Pickelhaube 50 0.78 0.88\n", + " picket fence 50 0.86 0.94\n", + " pickup truck 50 0.72 0.94\n", + " pier 50 0.54 0.92\n", + " piggy bank 50 0.8 0.94\n", + " pill bottle 50 0.72 0.9\n", + " pillow 50 0.76 0.88\n", + " ping-pong ball 50 0.78 0.88\n", + " pinwheel 50 0.8 0.94\n", + " pirate ship 50 0.76 0.92\n", + " pitcher 50 0.48 0.86\n", + " hand plane 50 0.9 0.92\n", + " planetarium 50 0.9 0.98\n", + " plastic bag 50 0.42 0.66\n", + " plate rack 50 0.52 0.82\n", + " plow 50 0.8 0.94\n", + " plunger 50 0.42 0.72\n", + " Polaroid camera 50 0.84 0.94\n", + " pole 50 0.4 0.76\n", + " police van 50 0.84 0.94\n", + " poncho 50 0.64 0.88\n", + " billiard table 50 0.84 0.92\n", + " soda bottle 50 0.58 0.9\n", + " pot 50 0.86 0.94\n", + " potter's wheel 50 0.92 0.94\n", + " power drill 50 0.38 0.7\n", + " prayer rug 50 0.7 0.88\n", + " printer 50 0.52 0.86\n", + " prison 50 0.66 0.9\n", + " projectile 50 0.34 0.96\n", + " projector 50 0.6 0.82\n", + " hockey puck 50 0.9 0.98\n", + " punching bag 50 0.62 0.72\n", + " purse 50 0.48 0.88\n", + " quill 50 0.78 0.86\n", + " quilt 50 0.6 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.78 0.94\n", + " radiator 50 0.7 0.84\n", + " radio 50 0.68 0.9\n", + " radio telescope 50 0.88 0.94\n", + " rain barrel 50 0.8 0.96\n", + " recreational vehicle 50 0.84 0.96\n", + " reel 50 0.72 0.8\n", + " reflex camera 50 0.76 0.96\n", + " refrigerator 50 0.76 0.92\n", + " remote control 50 0.72 0.94\n", + " restaurant 50 0.52 0.62\n", + " revolver 50 0.8 0.98\n", + " rifle 50 0.46 0.76\n", + " rocking chair 50 0.72 0.9\n", + " rotisserie 50 0.88 0.96\n", + " eraser 50 0.62 0.76\n", + " rugby ball 50 0.84 0.94\n", + " ruler 50 0.72 0.86\n", + " running shoe 50 0.84 0.94\n", + " safe 50 0.9 0.94\n", + " safety pin 50 0.48 0.8\n", + " salt shaker 50 0.62 0.8\n", + " sandal 50 0.7 0.82\n", + " sarong 50 0.62 0.8\n", + " saxophone 50 0.66 0.9\n", + " scabbard 50 0.78 0.92\n", + " weighing scale 50 0.62 0.84\n", + " school bus 50 0.92 1\n", + " schooner 50 0.8 1\n", + " scoreboard 50 0.86 0.98\n", + " CRT screen 50 0.16 0.8\n", + " screw 50 0.96 0.98\n", + " screwdriver 50 0.4 0.58\n", + " seat belt 50 0.9 0.92\n", + " sewing machine 50 0.74 0.94\n", + " shield 50 0.64 0.78\n", + " shoe store 50 0.84 0.98\n", + " shoji 50 0.76 0.92\n", + " shopping basket 50 0.52 0.84\n", + " shopping cart 50 0.76 0.9\n", + " shovel 50 0.7 0.84\n", + " shower cap 50 0.74 0.88\n", + " shower curtain 50 0.72 0.9\n", + " ski 50 0.68 0.94\n", + " ski mask 50 0.66 0.9\n", + " sleeping bag 50 0.66 0.8\n", + " slide rule 50 0.7 0.86\n", + " sliding door 50 0.54 0.76\n", + " slot machine 50 0.92 0.96\n", + " snorkel 50 0.86 1\n", + " snowmobile 50 0.86 0.96\n", + " snowplow 50 0.9 1\n", + " soap dispenser 50 0.52 0.9\n", + " soccer ball 50 0.84 0.98\n", + " sock 50 0.66 0.78\n", + " solar thermal collector 50 0.72 0.9\n", + " sombrero 50 0.7 0.84\n", + " soup bowl 50 0.6 0.94\n", + " space bar 50 0.32 0.84\n", + " space heater 50 0.64 0.74\n", + " space shuttle 50 0.86 0.98\n", + " spatula 50 0.28 0.6\n", + " motorboat 50 0.94 1\n", + " spider web 50 0.76 0.96\n", + " spindle 50 0.92 1\n", + " sports car 50 0.5 0.96\n", + " spotlight 50 0.34 0.66\n", + " stage 50 0.76 0.92\n", + " steam locomotive 50 0.96 1\n", + " through arch bridge 50 0.82 0.96\n", + " steel drum 50 0.8 0.94\n", + " stethoscope 50 0.52 0.84\n", + " scarf 50 0.54 0.92\n", + " stone wall 50 0.8 0.92\n", + " stopwatch 50 0.54 0.9\n", + " stove 50 0.46 0.78\n", + " strainer 50 0.58 0.84\n", + " tram 50 0.9 0.96\n", + " stretcher 50 0.46 0.74\n", + " couch 50 0.72 0.94\n", + " stupa 50 0.84 0.9\n", + " submarine 50 0.78 0.9\n", + " suit 50 0.62 0.88\n", + " sundial 50 0.46 0.78\n", + " sunglass 50 0.18 0.6\n", + " sunglasses 50 0.32 0.64\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.64 0.94\n", + " mop 50 0.8 0.96\n", + " sweatshirt 50 0.26 0.68\n", + " swimsuit 50 0.6 0.84\n", + " swing 50 0.78 0.88\n", + " switch 50 0.62 0.8\n", + " syringe 50 0.68 0.8\n", + " table lamp 50 0.54 0.88\n", + " tank 50 0.78 0.94\n", + " tape player 50 0.38 0.88\n", + " teapot 50 0.82 1\n", + " teddy bear 50 0.82 0.92\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.86 0.94\n", + " front curtain 50 0.76 0.94\n", + " thimble 50 0.68 0.82\n", + " threshing machine 50 0.64 0.9\n", + " throne 50 0.68 0.82\n", + " tile roof 50 0.84 0.96\n", + " toaster 50 0.64 0.82\n", + " tobacco shop 50 0.44 0.74\n", + " toilet seat 50 0.64 0.88\n", + " torch 50 0.62 0.86\n", + " totem pole 50 0.9 1\n", + " tow truck 50 0.64 0.92\n", + " toy store 50 0.64 0.9\n", + " tractor 50 0.86 0.98\n", + " semi-trailer truck 50 0.76 0.96\n", + " tray 50 0.54 0.76\n", + " trench coat 50 0.6 0.78\n", + " tricycle 50 0.78 0.96\n", + " trimaran 50 0.78 0.98\n", + " tripod 50 0.66 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.98 1\n", + " trombone 50 0.66 0.94\n", + " tub 50 0.3 0.86\n", + " turnstile 50 0.8 0.9\n", + " typewriter keyboard 50 0.74 0.98\n", + " umbrella 50 0.6 0.78\n", + " unicycle 50 0.78 0.96\n", + " upright piano 50 0.84 0.94\n", + " vacuum cleaner 50 0.84 0.92\n", + " vase 50 0.56 0.74\n", + " vault 50 0.78 0.9\n", + " velvet 50 0.22 0.5\n", + " vending machine 50 0.94 1\n", + " vestment 50 0.62 0.86\n", + " viaduct 50 0.78 0.88\n", + " violin 50 0.64 0.88\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " volleyball 50 0.96 1\n", + " waffle iron 50 0.72 0.84\n", + " wall clock 50 0.58 0.86\n", + " wallet 50 0.58 0.94\n", + " wardrobe 50 0.7 0.9\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.74 0.94\n", + " washing machine 50 0.82 0.94\n", + " water bottle 50 0.54 0.68\n", + " water jug 50 0.3 0.78\n", + " water tower 50 0.94 0.96\n", + " whiskey jug 50 0.64 0.76\n", + " whistle 50 0.7 0.82\n", + " wig 50 0.86 0.88\n", + " window screen 50 0.7 0.82\n", + " window shade 50 0.54 0.9\n", + " Windsor tie 50 0.32 0.64\n", + " wine bottle 50 0.46 0.76\n", + " wing 50 0.52 0.96\n", + " wok 50 0.54 0.92\n", + " wooden spoon 50 0.62 0.86\n", + " wool 50 0.42 0.84\n", + " split-rail fence 50 0.7 0.92\n", + " shipwreck 50 0.86 0.98\n", + " yawl 50 0.76 0.92\n", + " yurt 50 0.86 0.96\n", + " website 50 0.98 1\n", + " comic book 50 0.72 0.88\n", + " crossword 50 0.8 0.88\n", + " traffic sign 50 0.72 0.9\n", + " traffic light 50 0.8 0.96\n", + " dust jacket 50 0.78 0.94\n", + " menu 50 0.8 0.96\n", + " plate 50 0.44 0.86\n", + " guacamole 50 0.76 0.96\n", + " consomme 50 0.52 0.92\n", + " hot pot 50 0.78 1\n", + " trifle 50 0.9 1\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.68 0.8\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.86\n", + " pretzel 50 0.68 0.9\n", + " cheeseburger 50 0.92 0.96\n", + " hot dog 50 0.74 0.96\n", + " mashed potato 50 0.72 0.88\n", + " cabbage 50 0.88 0.98\n", + " broccoli 50 0.88 0.96\n", + " cauliflower 50 0.84 0.98\n", + " zucchini 50 0.68 0.98\n", + " spaghetti squash 50 0.82 0.96\n", + " acorn squash 50 0.8 1\n", + " butternut squash 50 0.72 0.94\n", + " cucumber 50 0.66 0.94\n", + " artichoke 50 0.86 0.96\n", + " bell pepper 50 0.86 0.94\n", + " cardoon 50 0.92 0.94\n", + " mushroom 50 0.38 0.96\n", + " Granny Smith 50 0.9 0.98\n", + " strawberry 50 0.64 0.88\n", + " orange 50 0.74 0.94\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.84 0.94\n", + " pineapple 50 0.9 1\n", + " banana 50 0.88 0.98\n", + " jackfruit 50 0.96 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.8 0.96\n", + " hay 50 0.84 0.96\n", + " carbonara 50 0.88 0.96\n", + " chocolate syrup 50 0.58 0.94\n", + " dough 50 0.36 0.68\n", + " meatloaf 50 0.64 0.88\n", + " pizza 50 0.78 0.9\n", + " pot pie 50 0.66 0.92\n", + " burrito 50 0.88 0.98\n", + " red wine 50 0.66 0.84\n", + " espresso 50 0.66 0.9\n", + " cup 50 0.42 0.78\n", + " eggnog 50 0.36 0.64\n", + " alp 50 0.54 0.94\n", + " bubble 50 0.86 0.96\n", + " cliff 50 0.66 1\n", + " coral reef 50 0.74 0.94\n", + " geyser 50 0.92 1\n", + " lakeshore 50 0.52 0.86\n", + " promontory 50 0.58 0.92\n", + " shoal 50 0.66 0.98\n", + " seashore 50 0.44 0.86\n", + " valley 50 0.72 0.98\n", + " volcano 50 0.72 0.94\n", + " baseball player 50 0.74 0.96\n", + " bridegroom 50 0.78 0.92\n", + " scuba diver 50 0.82 1\n", + " rapeseed 50 0.98 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.42 0.86\n", + " acorn 50 0.96 0.98\n", + " rose hip 50 0.9 0.96\n", + " horse chestnut seed 50 1 1\n", + " coral fungus 50 0.98 0.98\n", + " agaric 50 0.84 0.94\n", + " gyromitra 50 0.98 0.98\n", + " stinkhorn mushroom 50 0.84 0.92\n", + " earth star 50 1 1\n", + " hen-of-the-woods 50 0.9 0.96\n", + " bolete 50 0.8 0.94\n", + " ear 50 0.54 0.94\n", + " toilet paper 50 0.44 0.68\n", + "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 160 train, 160 test\n", + "Using 3 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", + " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", + " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", + "\n", + "Training complete (0.025 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 005161514f0db7203195dae99caa94a617ac09f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:10:35 +0100 Subject: [PATCH 145/277] Remove Colab notebook High-Memory notices (#10212) * Remove Colab notebook High-Memory notices Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 5 ++--- segment/tutorial.ipynb | 7 +++---- tutorial.ipynb | 3 +-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 8ed8b5db8a35..f235b754d7b4 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -1469,8 +1469,7 @@ "accelerator": "GPU", "colab": { "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Classification Tutorial", "provenance": [], "toc_visible": true }, diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index c26878fb0dbf..f3f978d43d93 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -572,8 +572,7 @@ "metadata": { "accelerator": "GPU", "colab": { - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Segmentation Tutorial", "provenance": [], "toc_visible": true }, @@ -597,4 +596,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 07a6625a1491..eb5b675db2be 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "machine_shape": "hm", "toc_visible": true }, "kernelspec": { @@ -973,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 2ecaa96c847c2b117bf1057d6caec54520fd592a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:17:52 +0100 Subject: [PATCH 146/277] Created using Colaboratory --- tutorial.ipynb | 134 ++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index eb5b675db2be..9d5aa9c85c51 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "13e0e8b77bf54b25b8893f0b4164315f": { + "300b4d5355ef4967bd5246afeef6eef5": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", - "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", - "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" + "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", + "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", + "IPY_MODEL_2667604641764341b0bc8c6afea438fd" ], - "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" + "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" } }, - "48037f2f7fea4012b9b341f6aee75297": { + "84e6829bb88845a8a4f42700b8496925": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", + "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", "placeholder": "​", - "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", + "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", "value": "100%" } }, - "3f3b925287274893baf5ed7bb0cf6635": { + "c038e52d41bf4d5b9602930c3d074087": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", + "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", + "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", "value": 818322941 } }, - "c44bdca7c9784b20ba2146250ee744d6": { + "2667604641764341b0bc8c6afea438fd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", + "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", "placeholder": "​", - "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", - "value": " 780M/780M [01:27<00:00, 6.98MB/s]" + "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", + "value": " 780M/780M [00:57<00:00, 10.1MB/s]" } }, - "5b0ed23cd32c4c7d8d9467b7425684ad": { + "98b3a4806ed14102b0d75e6c571d6134": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "1e10b4db5d644cb78bd6e005bb34038a": { + "c66a77395e42424d904699edcbb67291": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "a58728093ecb4eafb826bee11a84c549": { + "c4bbc15bf853439399dbcf1d40a5a407": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "9ce169fe4b8543c0b26d745daa230f18": { + "0aaabfac395b43afbdd6d752c502bbf6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "d5da01aca8fb400c96e76f44c9403581": { + "3786d970492b4aa38f886f2572fd958c": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "98cbaa572fdd4c42975f52015672b3a5": { + "b86d0f2d7be74cebbcaa884b53123eeb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "a636aa81f5cc453099c9e552f0986e63": { + "fa7b1497925a457f89286a71f073f416": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" + "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" + "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", - "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", + "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "13e0e8b77bf54b25b8893f0b4164315f", - "48037f2f7fea4012b9b341f6aee75297", - "3f3b925287274893baf5ed7bb0cf6635", - "c44bdca7c9784b20ba2146250ee744d6", - "5b0ed23cd32c4c7d8d9467b7425684ad", - "1e10b4db5d644cb78bd6e005bb34038a", - "a58728093ecb4eafb826bee11a84c549", - "9ce169fe4b8543c0b26d745daa230f18", - "d5da01aca8fb400c96e76f44c9403581", - "98cbaa572fdd4c42975f52015672b3a5", - "a636aa81f5cc453099c9e552f0986e63" + "300b4d5355ef4967bd5246afeef6eef5", + "84e6829bb88845a8a4f42700b8496925", + "c038e52d41bf4d5b9602930c3d074087", + "2667604641764341b0bc8c6afea438fd", + "98b3a4806ed14102b0d75e6c571d6134", + "c66a77395e42424d904699edcbb67291", + "c4bbc15bf853439399dbcf1d40a5a407", + "0aaabfac395b43afbdd6d752c502bbf6", + "3786d970492b4aa38f886f2572fd958c", + "b86d0f2d7be74cebbcaa884b53123eeb", + "fa7b1497925a457f89286a71f073f416" ] }, - "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" + "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "13e0e8b77bf54b25b8893f0b4164315f" + "model_id": "300b4d5355ef4967bd5246afeef6eef5" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" + "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.82s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=6.19s)\n", + "DONE (t=5.49s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=75.81s).\n", + "DONE (t=74.26s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.26s).\n", + "DONE (t=13.46s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" + "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", - "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", + "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 246.25it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 22:41:46 +0100 Subject: [PATCH 147/277] Created using Colaboratory --- segment/tutorial.ipynb | 70 +++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 38 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index f3f978d43d93..4192c69da628 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" + "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:03<00:00, 3.93MB/s]\n", + "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.7ms\n", - "Speed: 0.4ms pre-process, 15.5ms inference, 22.2ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" }, "outputs": [ { @@ -182,26 +182,23 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -273,27 +270,24 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", - "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", + "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -327,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 23:12:09 +0100 Subject: [PATCH 148/277] Created using Colaboratory --- classify/tutorial.ipynb | 3254 +++++++++++++++++---------------------- 1 file changed, 1445 insertions(+), 1809 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index f235b754d7b4..e035a7bda40d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1,1842 +1,1478 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", - "\n", - "```shell\n", - "python classify/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", - "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" - ] - } - ], - "source": [ - "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", - "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [], - "source": [ - "# Download Imagenet val\n", - "!bash data/scripts/get_imagenet.sh --val" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", + "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] }, - "id": "X58w8JLpMnjH", - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", - " Class Images top1_acc top5_acc\n", - " all 50000 0.734 0.914\n", - " tench 50 0.92 0.98\n", - " goldfish 50 0.86 0.98\n", - " great white shark 50 0.76 0.94\n", - " tiger shark 50 0.84 0.96\n", - " hammerhead shark 50 0.88 0.98\n", - " electric ray 50 0.76 0.88\n", - " stingray 50 0.74 0.94\n", - " cock 50 0.74 0.94\n", - " hen 50 0.86 0.96\n", - " ostrich 50 0.98 1\n", - " brambling 50 0.9 0.98\n", - " goldfinch 50 0.92 1\n", - " house finch 50 0.92 1\n", - " junco 50 0.98 1\n", - " indigo bunting 50 0.86 0.94\n", - " American robin 50 0.94 1\n", - " bulbul 50 0.88 0.92\n", - " jay 50 0.92 0.98\n", - " magpie 50 0.9 0.98\n", - " chickadee 50 0.96 1\n", - " American dipper 50 0.86 0.92\n", - " kite 50 0.8 0.94\n", - " bald eagle 50 0.9 0.98\n", - " vulture 50 0.96 1\n", - " great grey owl 50 0.96 0.98\n", - " fire salamander 50 0.96 0.98\n", - " smooth newt 50 0.66 0.98\n", - " newt 50 0.74 0.84\n", - " spotted salamander 50 0.9 0.98\n", - " axolotl 50 0.9 0.98\n", - " American bullfrog 50 0.8 0.92\n", - " tree frog 50 0.8 0.94\n", - " tailed frog 50 0.5 0.82\n", - " loggerhead sea turtle 50 0.7 0.92\n", - " leatherback sea turtle 50 0.58 0.8\n", - " mud turtle 50 0.58 0.84\n", - " terrapin 50 0.52 0.98\n", - " box turtle 50 0.88 1\n", - " banded gecko 50 0.78 0.9\n", - " green iguana 50 0.78 0.92\n", - " Carolina anole 50 0.62 0.98\n", - "desert grassland whiptail lizard 50 0.88 0.96\n", - " agama 50 0.78 0.96\n", - " frilled-necked lizard 50 0.82 0.94\n", - " alligator lizard 50 0.64 0.84\n", - " Gila monster 50 0.76 0.86\n", - " European green lizard 50 0.5 0.96\n", - " chameleon 50 0.78 0.9\n", - " Komodo dragon 50 0.9 1\n", - " Nile crocodile 50 0.66 0.92\n", - " American alligator 50 0.78 0.98\n", - " triceratops 50 0.96 0.98\n", - " worm snake 50 0.76 0.9\n", - " ring-necked snake 50 0.84 0.96\n", - " eastern hog-nosed snake 50 0.62 0.86\n", - " smooth green snake 50 0.64 0.96\n", - " kingsnake 50 0.78 0.94\n", - " garter snake 50 0.86 0.98\n", - " water snake 50 0.78 0.92\n", - " vine snake 50 0.72 0.86\n", - " night snake 50 0.34 0.86\n", - " boa constrictor 50 0.8 0.96\n", - " African rock python 50 0.52 0.82\n", - " Indian cobra 50 0.8 0.94\n", - " green mamba 50 0.56 0.92\n", - " sea snake 50 0.76 0.94\n", - " Saharan horned viper 50 0.48 0.88\n", - "eastern diamondback rattlesnake 50 0.72 0.92\n", - " sidewinder 50 0.38 0.92\n", - " trilobite 50 0.98 0.98\n", - " harvestman 50 0.86 0.94\n", - " scorpion 50 0.88 0.94\n", - " yellow garden spider 50 0.88 0.96\n", - " barn spider 50 0.38 0.96\n", - " European garden spider 50 0.6 0.98\n", - " southern black widow 50 0.84 0.98\n", - " tarantula 50 0.94 0.98\n", - " wolf spider 50 0.7 0.92\n", - " tick 50 0.76 0.82\n", - " centipede 50 0.74 0.86\n", - " black grouse 50 0.88 0.98\n", - " ptarmigan 50 0.84 0.98\n", - " ruffed grouse 50 0.9 1\n", - " prairie grouse 50 0.9 0.96\n", - " peacock 50 0.9 0.9\n", - " quail 50 0.88 0.94\n", - " partridge 50 0.66 0.94\n", - " grey parrot 50 0.94 0.98\n", - " macaw 50 0.92 0.98\n", - "sulphur-crested cockatoo 50 0.94 0.98\n", - " lorikeet 50 0.98 1\n", - " coucal 50 0.9 0.92\n", - " bee eater 50 0.96 0.98\n", - " hornbill 50 0.86 0.98\n", - " hummingbird 50 0.9 0.98\n", - " jacamar 50 0.94 0.94\n", - " toucan 50 0.84 0.94\n", - " duck 50 0.78 0.94\n", - " red-breasted merganser 50 0.94 0.98\n", - " goose 50 0.76 0.98\n", - " black swan 50 0.94 1\n", - " tusker 50 0.58 0.92\n", - " echidna 50 1 1\n", - " platypus 50 0.72 0.84\n", - " wallaby 50 0.86 0.92\n", - " koala 50 0.84 0.98\n", - " wombat 50 0.82 0.86\n", - " jellyfish 50 0.94 0.96\n", - " sea anemone 50 0.66 0.98\n", - " brain coral 50 0.9 0.96\n", - " flatworm 50 0.76 1\n", - " nematode 50 0.9 0.92\n", - " conch 50 0.74 0.92\n", - " snail 50 0.78 0.86\n", - " slug 50 0.78 0.9\n", - " sea slug 50 0.94 0.98\n", - " chiton 50 0.86 0.96\n", - " chambered nautilus 50 0.86 0.94\n", - " Dungeness crab 50 0.86 0.96\n", - " rock crab 50 0.66 0.88\n", - " fiddler crab 50 0.64 0.88\n", - " red king crab 50 0.78 0.92\n", - " American lobster 50 0.78 0.96\n", - " spiny lobster 50 0.78 0.88\n", - " crayfish 50 0.56 0.84\n", - " hermit crab 50 0.82 0.96\n", - " isopod 50 0.62 0.74\n", - " white stork 50 0.88 0.94\n", - " black stork 50 0.86 0.96\n", - " spoonbill 50 0.96 1\n", - " flamingo 50 0.94 1\n", - " little blue heron 50 0.92 0.98\n", - " great egret 50 0.9 0.98\n", - " bittern 50 0.9 0.92\n", - " crane (bird) 50 0.64 0.94\n", - " limpkin 50 0.96 0.98\n", - " common gallinule 50 0.96 0.96\n", - " American coot 50 0.94 1\n", - " bustard 50 0.96 0.98\n", - " ruddy turnstone 50 0.96 1\n", - " dunlin 50 0.86 0.94\n", - " common redshank 50 0.92 0.96\n", - " dowitcher 50 0.9 1\n", - " oystercatcher 50 0.9 0.96\n", - " pelican 50 0.96 1\n", - " king penguin 50 0.88 0.92\n", - " albatross 50 0.9 0.98\n", - " grey whale 50 0.86 0.94\n", - " killer whale 50 0.9 0.98\n", - " dugong 50 0.88 0.94\n", - " sea lion 50 0.78 0.98\n", - " Chihuahua 50 0.56 0.82\n", - " Japanese Chin 50 0.7 0.98\n", - " Maltese 50 0.86 0.94\n", - " Pekingese 50 0.84 0.94\n", - " Shih Tzu 50 0.68 0.94\n", - " King Charles Spaniel 50 0.92 0.98\n", - " Papillon 50 0.92 0.94\n", - " toy terrier 50 0.48 0.96\n", - " Rhodesian Ridgeback 50 0.76 0.94\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " Afghan Hound 50 0.9 0.98\n", - " Basset Hound 50 0.78 0.9\n", - " Beagle 50 0.82 0.98\n", - " Bloodhound 50 0.5 0.78\n", - " Bluetick Coonhound 50 0.84 0.94\n", - " Black and Tan Coonhound 50 0.46 0.8\n", - "Treeing Walker Coonhound 50 0.58 0.98\n", - " English foxhound 50 0.24 0.8\n", - " Redbone Coonhound 50 0.66 0.92\n", - " borzoi 50 0.94 1\n", - " Irish Wolfhound 50 0.64 0.9\n", - " Italian Greyhound 50 0.8 0.98\n", - " Whippet 50 0.82 0.98\n", - " Ibizan Hound 50 0.64 0.92\n", - " Norwegian Elkhound 50 0.88 1\n", - " Otterhound 50 0.58 0.9\n", - " Saluki 50 0.72 0.92\n", - " Scottish Deerhound 50 0.86 1\n", - " Weimaraner 50 0.88 0.96\n", - "Staffordshire Bull Terrier 50 0.62 0.92\n", - "American Staffordshire Terrier 50 0.66 0.92\n", - " Bedlington Terrier 50 0.82 0.96\n", - " Border Terrier 50 0.9 0.98\n", - " Kerry Blue Terrier 50 0.82 1\n", - " Irish Terrier 50 0.74 0.94\n", - " Norfolk Terrier 50 0.74 0.92\n", - " Norwich Terrier 50 0.68 0.98\n", - " Yorkshire Terrier 50 0.66 0.88\n", - " Wire Fox Terrier 50 0.66 0.96\n", - " Lakeland Terrier 50 0.82 0.94\n", - " Sealyham Terrier 50 0.74 0.9\n", - " Airedale Terrier 50 0.82 0.9\n", - " Cairn Terrier 50 0.82 0.94\n", - " Australian Terrier 50 0.48 0.84\n", - " Dandie Dinmont Terrier 50 0.84 0.9\n", - " Boston Terrier 50 0.88 1\n", - " Miniature Schnauzer 50 0.7 0.92\n", - " Giant Schnauzer 50 0.82 1\n", - " Standard Schnauzer 50 0.72 0.98\n", - " Scottish Terrier 50 0.78 0.94\n", - " Tibetan Terrier 50 0.64 0.98\n", - "Australian Silky Terrier 50 0.72 0.96\n", - "Soft-coated Wheaten Terrier 50 0.86 0.98\n", - "West Highland White Terrier 50 0.94 0.98\n", - " Lhasa Apso 50 0.66 0.96\n", - " Flat-Coated Retriever 50 0.78 1\n", - " Curly-coated Retriever 50 0.84 0.96\n", - " Golden Retriever 50 0.88 0.96\n", - " Labrador Retriever 50 0.82 0.94\n", - "Chesapeake Bay Retriever 50 0.86 0.98\n", - "German Shorthaired Pointer 50 0.84 0.96\n", - " Vizsla 50 0.7 0.94\n", - " English Setter 50 0.8 1\n", - " Irish Setter 50 0.78 0.9\n", - " Gordon Setter 50 0.84 0.92\n", - " Brittany 50 0.86 0.98\n", - " Clumber Spaniel 50 0.9 0.96\n", - "English Springer Spaniel 50 0.96 1\n", - " Welsh Springer Spaniel 50 0.92 1\n", - " Cocker Spaniels 50 0.7 0.96\n", - " Sussex Spaniel 50 0.7 0.88\n", - " Irish Water Spaniel 50 0.86 0.94\n", - " Kuvasz 50 0.7 0.92\n", - " Schipperke 50 0.94 0.98\n", - " Groenendael 50 0.78 0.92\n", - " Malinois 50 0.92 0.98\n", - " Briard 50 0.6 0.84\n", - " Australian Kelpie 50 0.74 0.96\n", - " Komondor 50 0.9 0.96\n", - " Old English Sheepdog 50 0.94 0.98\n", - " Shetland Sheepdog 50 0.72 0.94\n", - " collie 50 0.6 0.96\n", - " Border Collie 50 0.82 0.96\n", - " Bouvier des Flandres 50 0.78 0.96\n", - " Rottweiler 50 0.94 0.98\n", - " German Shepherd Dog 50 0.76 0.98\n", - " Dobermann 50 0.74 1\n", - " Miniature Pinscher 50 0.76 0.96\n", - "Greater Swiss Mountain Dog 50 0.66 0.94\n", - " Bernese Mountain Dog 50 0.94 1\n", - " Appenzeller Sennenhund 50 0.3 1\n", - " Entlebucher Sennenhund 50 0.72 0.98\n", - " Boxer 50 0.7 0.92\n", - " Bullmastiff 50 0.8 0.98\n", - " Tibetan Mastiff 50 0.92 0.98\n", - " French Bulldog 50 0.86 0.98\n", - " Great Dane 50 0.6 0.92\n", - " St. Bernard 50 0.94 1\n", - " husky 50 0.5 0.94\n", - " Alaskan Malamute 50 0.76 0.96\n", - " Siberian Husky 50 0.56 0.98\n", - " Dalmatian 50 0.94 0.98\n", - " Affenpinscher 50 0.76 0.92\n", - " Basenji 50 0.9 1\n", - " pug 50 0.96 0.98\n", - " Leonberger 50 0.98 1\n", - " Newfoundland 50 0.82 0.96\n", - " Pyrenean Mountain Dog 50 0.76 0.94\n", - " Samoyed 50 0.9 0.98\n", - " Pomeranian 50 0.96 1\n", - " Chow Chow 50 0.88 0.96\n", - " Keeshond 50 0.94 1\n", - " Griffon Bruxellois 50 0.92 0.98\n", - " Pembroke Welsh Corgi 50 0.9 0.98\n", - " Cardigan Welsh Corgi 50 0.7 0.94\n", - " Toy Poodle 50 0.52 0.96\n", - " Miniature Poodle 50 0.56 0.92\n", - " Standard Poodle 50 0.78 0.96\n", - " Mexican hairless dog 50 0.86 0.98\n", - " grey wolf 50 0.74 0.92\n", - " Alaskan tundra wolf 50 0.86 0.98\n", - " red wolf 50 0.54 0.92\n", - " coyote 50 0.62 0.82\n", - " dingo 50 0.76 0.94\n", - " dhole 50 0.9 0.96\n", - " African wild dog 50 1 1\n", - " hyena 50 0.9 0.94\n", - " red fox 50 0.62 0.92\n", - " kit fox 50 0.7 0.98\n", - " Arctic fox 50 0.92 0.98\n", - " grey fox 50 0.66 0.96\n", - " tabby cat 50 0.58 0.92\n", - " tiger cat 50 0.2 0.94\n", - " Persian cat 50 0.92 1\n", - " Siamese cat 50 0.94 0.98\n", - " Egyptian Mau 50 0.52 0.84\n", - " cougar 50 0.94 0.96\n", - " lynx 50 0.74 0.9\n", - " leopard 50 0.86 1\n", - " snow leopard 50 0.9 0.98\n", - " jaguar 50 0.72 0.92\n", - " lion 50 0.9 0.98\n", - " tiger 50 0.96 0.98\n", - " cheetah 50 0.94 0.98\n", - " brown bear 50 0.9 0.98\n", - " American black bear 50 0.9 0.98\n", - " polar bear 50 0.86 0.94\n", - " sloth bear 50 0.72 0.92\n", - " mongoose 50 0.7 0.86\n", - " meerkat 50 0.82 0.98\n", - " tiger beetle 50 0.9 0.94\n", - " ladybug 50 0.78 0.98\n", - " ground beetle 50 0.62 0.94\n", - " longhorn beetle 50 0.58 0.9\n", - " leaf beetle 50 0.66 0.98\n", - " dung beetle 50 0.88 0.98\n", - " rhinoceros beetle 50 0.88 1\n", - " weevil 50 0.92 1\n", - " fly 50 0.78 0.94\n", - " bee 50 0.8 0.96\n", - " ant 50 0.68 0.84\n", - " grasshopper 50 0.48 0.9\n", - " cricket 50 0.66 0.94\n", - " stick insect 50 0.7 0.94\n", - " cockroach 50 0.72 0.84\n", - " mantis 50 0.72 0.9\n", - " cicada 50 0.9 0.96\n", - " leafhopper 50 0.9 0.96\n", - " lacewing 50 0.8 0.94\n", - " dragonfly 50 0.76 0.98\n", - " damselfly 50 0.82 1\n", - " red admiral 50 0.96 0.96\n", - " ringlet 50 0.88 1\n", - " monarch butterfly 50 0.9 0.96\n", - " small white 50 0.88 1\n", - " sulphur butterfly 50 0.92 1\n", - "gossamer-winged butterfly 50 0.9 1\n", - " starfish 50 0.82 0.94\n", - " sea urchin 50 0.84 0.98\n", - " sea cucumber 50 0.76 0.92\n", - " cottontail rabbit 50 0.7 0.98\n", - " hare 50 0.9 1\n", - " Angora rabbit 50 0.92 0.98\n", - " hamster 50 1 1\n", - " porcupine 50 0.9 0.98\n", - " fox squirrel 50 0.82 0.96\n", - " marmot 50 0.94 0.96\n", - " beaver 50 0.78 0.96\n", - " guinea pig 50 0.78 0.92\n", - " common sorrel 50 0.98 0.98\n", - " zebra 50 0.96 0.98\n", - " pig 50 0.54 0.82\n", - " wild boar 50 0.86 0.96\n", - " warthog 50 0.96 0.96\n", - " hippopotamus 50 0.9 1\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " ox 50 0.52 0.94\n", - " water buffalo 50 0.86 0.94\n", - " bison 50 0.9 0.98\n", - " ram 50 0.62 0.98\n", - " bighorn sheep 50 0.72 1\n", - " Alpine ibex 50 0.96 0.98\n", - " hartebeest 50 0.94 1\n", - " impala 50 0.86 0.98\n", - " gazelle 50 0.74 0.96\n", - " dromedary 50 0.94 1\n", - " llama 50 0.86 0.94\n", - " weasel 50 0.42 0.96\n", - " mink 50 0.78 0.92\n", - " European polecat 50 0.54 0.88\n", - " black-footed ferret 50 0.74 0.96\n", - " otter 50 0.68 0.9\n", - " skunk 50 0.94 0.96\n", - " badger 50 0.88 0.92\n", - " armadillo 50 0.88 0.96\n", - " three-toed sloth 50 0.96 1\n", - " orangutan 50 0.82 0.9\n", - " gorilla 50 0.78 0.94\n", - " chimpanzee 50 0.86 0.94\n", - " gibbon 50 0.74 0.9\n", - " siamang 50 0.68 0.94\n", - " guenon 50 0.82 0.96\n", - " patas monkey 50 0.66 0.86\n", - " baboon 50 0.88 0.96\n", - " macaque 50 0.72 0.84\n", - " langur 50 0.56 0.78\n", - " black-and-white colobus 50 0.84 0.92\n", - " proboscis monkey 50 0.98 1\n", - " marmoset 50 0.7 0.92\n", - " white-headed capuchin 50 0.82 0.94\n", - " howler monkey 50 0.9 0.96\n", - " titi 50 0.54 0.9\n", - "Geoffroy's spider monkey 50 0.36 0.86\n", - " common squirrel monkey 50 0.76 0.92\n", - " ring-tailed lemur 50 0.7 0.94\n", - " indri 50 0.86 0.98\n", - " Asian elephant 50 0.54 0.96\n", - " African bush elephant 50 0.62 0.96\n", - " red panda 50 0.94 0.94\n", - " giant panda 50 0.92 0.98\n", - " snoek 50 0.76 0.9\n", - " eel 50 0.58 0.86\n", - " coho salmon 50 0.8 0.98\n", - " rock beauty 50 0.8 0.96\n", - " clownfish 50 0.8 0.98\n", - " sturgeon 50 0.76 0.96\n", - " garfish 50 0.7 0.82\n", - " lionfish 50 0.94 0.98\n", - " pufferfish 50 0.86 0.98\n", - " abacus 50 0.8 0.88\n", - " abaya 50 0.72 0.94\n", - " academic gown 50 0.44 0.94\n", - " accordion 50 0.78 0.96\n", - " acoustic guitar 50 0.54 0.78\n", - " aircraft carrier 50 0.7 0.98\n", - " airliner 50 0.92 1\n", - " airship 50 0.8 0.88\n", - " altar 50 0.6 0.94\n", - " ambulance 50 0.84 0.98\n", - " amphibious vehicle 50 0.68 0.9\n", - " analog clock 50 0.5 0.88\n", - " apiary 50 0.9 1\n", - " apron 50 0.68 0.86\n", - " waste container 50 0.6 0.86\n", - " assault rifle 50 0.36 0.9\n", - " backpack 50 0.36 0.72\n", - " bakery 50 0.38 0.64\n", - " balance beam 50 0.84 0.98\n", - " balloon 50 0.88 0.96\n", - " ballpoint pen 50 0.52 0.96\n", - " Band-Aid 50 0.68 0.96\n", - " banjo 50 0.9 1\n", - " baluster 50 0.74 0.94\n", - " barbell 50 0.58 0.9\n", - " barber chair 50 0.72 0.9\n", - " barbershop 50 0.64 0.9\n", - " barn 50 0.96 0.96\n", - " barometer 50 0.86 0.96\n", - " barrel 50 0.64 0.86\n", - " wheelbarrow 50 0.64 0.92\n", - " baseball 50 0.76 0.96\n", - " basketball 50 0.88 0.98\n", - " bassinet 50 0.8 0.94\n", - " bassoon 50 0.84 0.98\n", - " swimming cap 50 0.7 0.88\n", - " bath towel 50 0.56 0.84\n", - " bathtub 50 0.34 0.86\n", - " station wagon 50 0.68 0.9\n", - " lighthouse 50 0.74 0.96\n", - " beaker 50 0.46 0.7\n", - " military cap 50 0.88 0.98\n", - " beer bottle 50 0.72 0.9\n", - " beer glass 50 0.72 0.9\n", - " bell-cot 50 0.6 0.96\n", - " bib 50 0.58 0.86\n", - " tandem bicycle 50 0.76 0.96\n", - " bikini 50 0.52 0.88\n", - " ring binder 50 0.7 0.86\n", - " binoculars 50 0.54 0.78\n", - " birdhouse 50 0.86 0.96\n", - " boathouse 50 0.78 0.96\n", - " bobsleigh 50 0.94 0.96\n", - " bolo tie 50 0.86 0.88\n", - " poke bonnet 50 0.68 0.88\n", - " bookcase 50 0.68 0.92\n", - " bookstore 50 0.58 0.88\n", - " bottle cap 50 0.62 0.8\n", - " bow 50 0.74 0.84\n", - " bow tie 50 0.68 0.92\n", - " brass 50 0.92 0.98\n", - " bra 50 0.52 0.76\n", - " breakwater 50 0.64 0.94\n", - " breastplate 50 0.36 0.9\n", - " broom 50 0.58 0.84\n", - " bucket 50 0.58 0.88\n", - " buckle 50 0.5 0.76\n", - " bulletproof vest 50 0.52 0.76\n", - " high-speed train 50 0.94 0.98\n", - " butcher shop 50 0.76 0.94\n", - " taxicab 50 0.7 0.92\n", - " cauldron 50 0.5 0.72\n", - " candle 50 0.5 0.76\n", - " cannon 50 0.88 0.96\n", - " canoe 50 0.94 1\n", - " can opener 50 0.72 0.88\n", - " cardigan 50 0.66 0.88\n", - " car mirror 50 0.94 0.98\n", - " carousel 50 0.96 0.96\n", - " tool kit 50 0.68 0.84\n", - " carton 50 0.44 0.78\n", - " car wheel 50 0.4 0.78\n", - "automated teller machine 50 0.82 0.94\n", - " cassette 50 0.62 0.84\n", - " cassette player 50 0.3 0.92\n", - " castle 50 0.74 0.9\n", - " catamaran 50 0.74 0.98\n", - " CD player 50 0.52 0.8\n", - " cello 50 0.84 1\n", - " mobile phone 50 0.72 0.86\n", - " chain 50 0.34 0.78\n", - " chain-link fence 50 0.7 0.86\n", - " chain mail 50 0.68 0.86\n", - " chainsaw 50 0.88 0.96\n", - " chest 50 0.7 0.88\n", - " chiffonier 50 0.32 0.64\n", - " chime 50 0.64 0.84\n", - " china cabinet 50 0.78 0.94\n", - " Christmas stocking 50 0.92 0.98\n", - " church 50 0.6 0.86\n", - " movie theater 50 0.68 0.9\n", - " cleaver 50 0.36 0.68\n", - " cliff dwelling 50 0.86 1\n", - " cloak 50 0.28 0.7\n", - " clogs 50 0.6 0.88\n", - " cocktail shaker 50 0.62 0.76\n", - " coffee mug 50 0.48 0.78\n", - " coffeemaker 50 0.62 0.92\n", - " coil 50 0.64 0.86\n", - " combination lock 50 0.62 0.92\n", - " computer keyboard 50 0.72 0.92\n", - " confectionery store 50 0.56 0.84\n", - " container ship 50 0.82 0.98\n", - " convertible 50 0.78 1\n", - " corkscrew 50 0.84 0.98\n", - " cornet 50 0.56 0.98\n", - " cowboy boot 50 0.66 0.78\n", - " cowboy hat 50 0.66 0.88\n", - " cradle 50 0.34 0.8\n", - " crane (machine) 50 0.8 0.92\n", - " crash helmet 50 0.92 0.96\n", - " crate 50 0.6 0.86\n", - " infant bed 50 0.8 0.96\n", - " Crock Pot 50 0.78 0.88\n", - " croquet ball 50 0.9 1\n", - " crutch 50 0.42 0.7\n", - " cuirass 50 0.54 0.92\n", - " dam 50 0.78 0.92\n", - " desk 50 0.68 0.88\n", - " desktop computer 50 0.54 0.9\n", - " rotary dial telephone 50 0.92 0.96\n", - " diaper 50 0.68 0.84\n", - " digital clock 50 0.6 0.8\n", - " digital watch 50 0.56 0.82\n" - ] + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "Resolving image-net.org (image-net.org)... 171.64.68.16\n", + "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 6744924160 (6.3G) [application/x-tar]\n", + "Saving to: ‘ILSVRC2012_img_val.tar’\n", + "\n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "\n", + "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "\n" + ] + } + ], + "source": [ + "# Download Imagenet val (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " dining table 50 0.78 0.88\n", - " dishcloth 50 0.98 1\n", - " dishwasher 50 0.52 0.74\n", - " disc brake 50 0.96 1\n", - " dock 50 0.56 0.96\n", - " dog sled 50 0.9 0.98\n", - " dome 50 0.74 0.96\n", - " doormat 50 0.6 0.82\n", - " drilling rig 50 0.82 0.94\n", - " drum 50 0.4 0.72\n", - " drumstick 50 0.56 0.82\n", - " dumbbell 50 0.6 0.92\n", - " Dutch oven 50 0.66 0.88\n", - " electric fan 50 0.82 0.84\n", - " electric guitar 50 0.66 0.92\n", - " electric locomotive 50 0.92 0.98\n", - " entertainment center 50 0.92 1\n", - " envelope 50 0.58 0.88\n", - " espresso machine 50 0.72 0.94\n", - " face powder 50 0.76 0.92\n", - " feather boa 50 0.8 0.88\n", - " filing cabinet 50 0.84 0.98\n", - " fireboat 50 0.96 0.96\n", - " fire engine 50 0.82 0.92\n", - " fire screen sheet 50 0.52 0.78\n", - " flagpole 50 0.76 0.92\n", - " flute 50 0.4 0.76\n", - " folding chair 50 0.68 0.9\n", - " football helmet 50 0.9 0.96\n", - " forklift 50 0.8 0.94\n", - " fountain 50 0.88 0.92\n", - " fountain pen 50 0.76 0.92\n", - " four-poster bed 50 0.82 0.92\n", - " freight car 50 0.98 0.98\n", - " French horn 50 0.76 0.92\n", - " frying pan 50 0.48 0.82\n", - " fur coat 50 0.86 0.96\n", - " garbage truck 50 0.9 0.98\n", - " gas mask 50 0.82 0.92\n", - " gas pump 50 0.82 0.98\n", - " goblet 50 0.64 0.9\n", - " go-kart 50 0.9 1\n", - " golf ball 50 0.86 0.96\n", - " golf cart 50 0.76 0.9\n", - " gondola 50 0.94 0.98\n", - " gong 50 0.74 0.92\n", - " gown 50 0.72 0.94\n", - " grand piano 50 0.74 0.96\n", - " greenhouse 50 0.84 1\n", - " grille 50 0.72 0.88\n", - " grocery store 50 0.68 0.9\n", - " guillotine 50 0.84 0.94\n", - " barrette 50 0.48 0.68\n", - " hair spray 50 0.4 0.76\n", - " half-track 50 0.76 0.96\n", - " hammer 50 0.54 0.78\n", - " hamper 50 0.72 0.9\n", - " hair dryer 50 0.7 0.8\n", - " hand-held computer 50 0.52 0.88\n", - " handkerchief 50 0.8 0.96\n", - " hard disk drive 50 0.78 0.86\n", - " harmonica 50 0.68 0.96\n", - " harp 50 0.9 0.96\n", - " harvester 50 0.86 1\n", - " hatchet 50 0.6 0.84\n", - " holster 50 0.7 0.84\n", - " home theater 50 0.72 0.96\n", - " honeycomb 50 0.74 0.86\n", - " hook 50 0.28 0.62\n", - " hoop skirt 50 0.68 0.8\n", - " horizontal bar 50 0.76 0.98\n", - " horse-drawn vehicle 50 0.9 0.9\n", - " hourglass 50 0.92 0.98\n", - " iPod 50 0.9 0.94\n", - " clothes iron 50 0.72 0.9\n", - " jack-o'-lantern 50 0.94 0.98\n", - " jeans 50 0.7 0.82\n", - " jeep 50 0.76 0.9\n", - " T-shirt 50 0.72 0.94\n", - " jigsaw puzzle 50 0.92 0.96\n", - " pulled rickshaw 50 0.88 0.96\n", - " joystick 50 0.74 0.98\n", - " kimono 50 0.78 0.94\n", - " knee pad 50 0.7 0.86\n", - " knot 50 0.8 0.86\n", - " lab coat 50 0.82 0.98\n", - " ladle 50 0.26 0.64\n", - " lampshade 50 0.62 0.8\n", - " laptop computer 50 0.2 0.88\n", - " lawn mower 50 0.8 0.96\n", - " lens cap 50 0.5 0.8\n", - " paper knife 50 0.3 0.58\n", - " library 50 0.62 0.92\n", - " lifeboat 50 0.94 0.98\n", - " lighter 50 0.56 0.8\n", - " limousine 50 0.74 0.92\n", - " ocean liner 50 0.88 0.96\n", - " lipstick 50 0.7 0.88\n", - " slip-on shoe 50 0.82 0.94\n", - " lotion 50 0.56 0.9\n", - " speaker 50 0.58 0.64\n", - " loupe 50 0.32 0.54\n", - " sawmill 50 0.74 0.9\n", - " magnetic compass 50 0.48 0.78\n", - " mail bag 50 0.64 0.94\n", - " mailbox 50 0.82 0.92\n", - " tights 50 0.28 0.9\n", - " tank suit 50 0.3 0.88\n", - " manhole cover 50 0.94 0.98\n", - " maraca 50 0.72 0.86\n", - " marimba 50 0.84 0.94\n", - " mask 50 0.48 0.78\n", - " match 50 0.74 0.92\n", - " maypole 50 0.96 1\n", - " maze 50 0.82 1\n", - " measuring cup 50 0.66 0.82\n", - " medicine chest 50 0.6 0.9\n", - " megalith 50 0.84 0.92\n", - " microphone 50 0.56 0.74\n", - " microwave oven 50 0.56 0.8\n", - " military uniform 50 0.62 0.86\n", - " milk can 50 0.7 0.82\n", - " minibus 50 0.68 1\n", - " miniskirt 50 0.58 0.84\n", - " minivan 50 0.48 0.8\n", - " missile 50 0.34 0.82\n", - " mitten 50 0.76 0.88\n", - " mixing bowl 50 0.82 0.98\n", - " mobile home 50 0.58 0.8\n", - " Model T 50 0.92 0.96\n", - " modem 50 0.7 0.9\n", - " monastery 50 0.52 0.86\n", - " monitor 50 0.34 0.86\n", - " moped 50 0.56 0.94\n", - " mortar 50 0.72 0.88\n", - " square academic cap 50 0.48 0.82\n", - " mosque 50 0.98 1\n", - " mosquito net 50 0.96 0.98\n", - " scooter 50 0.88 0.98\n", - " mountain bike 50 0.74 0.96\n", - " tent 50 0.88 0.96\n", - " computer mouse 50 0.38 0.82\n", - " mousetrap 50 0.82 0.9\n", - " moving van 50 0.48 0.8\n", - " muzzle 50 0.5 0.74\n", - " nail 50 0.68 0.76\n", - " neck brace 50 0.62 0.72\n", - " necklace 50 0.92 1\n", - " nipple 50 0.8 0.92\n", - " notebook computer 50 0.34 0.88\n", - " obelisk 50 0.82 0.94\n", - " oboe 50 0.62 0.84\n", - " ocarina 50 0.82 0.88\n", - " odometer 50 0.98 1\n", - " oil filter 50 0.6 0.82\n", - " organ 50 0.84 0.94\n", - " oscilloscope 50 0.94 0.96\n", - " overskirt 50 0.2 0.62\n", - " bullock cart 50 0.76 0.94\n", - " oxygen mask 50 0.48 0.8\n", - " packet 50 0.54 0.74\n", - " paddle 50 0.7 0.94\n", - " paddle wheel 50 0.92 0.98\n", - " padlock 50 0.64 0.78\n", - " paintbrush 50 0.66 0.78\n", - " pajamas 50 0.68 0.94\n", - " palace 50 0.66 0.94\n", - " pan flute 50 0.84 0.86\n", - " paper towel 50 0.68 0.86\n", - " parachute 50 0.92 0.96\n", - " parallel bars 50 0.68 0.96\n", - " park bench 50 0.82 0.94\n", - " parking meter 50 0.86 0.98\n", - " passenger car 50 0.48 0.86\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.715 0.902\n", + " tench 50 0.94 0.98\n", + " goldfish 50 0.88 0.92\n", + " great white shark 50 0.78 0.96\n", + " tiger shark 50 0.68 0.96\n", + " hammerhead shark 50 0.82 0.92\n", + " electric ray 50 0.76 0.9\n", + " stingray 50 0.7 0.9\n", + " cock 50 0.78 0.92\n", + " hen 50 0.84 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.96\n", + " goldfinch 50 0.92 0.98\n", + " house finch 50 0.88 0.96\n", + " junco 50 0.94 0.98\n", + " indigo bunting 50 0.86 0.88\n", + " American robin 50 0.9 0.96\n", + " bulbul 50 0.84 0.96\n", + " jay 50 0.9 0.96\n", + " magpie 50 0.84 0.96\n", + " chickadee 50 0.9 1\n", + " American dipper 50 0.82 0.92\n", + " kite 50 0.76 0.94\n", + " bald eagle 50 0.92 1\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.94 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.58 0.94\n", + " newt 50 0.74 0.9\n", + " spotted salamander 50 0.86 0.94\n", + " axolotl 50 0.86 0.96\n", + " American bullfrog 50 0.78 0.92\n", + " tree frog 50 0.84 0.96\n", + " tailed frog 50 0.48 0.8\n", + " loggerhead sea turtle 50 0.68 0.94\n", + " leatherback sea turtle 50 0.5 0.8\n", + " mud turtle 50 0.64 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.84 0.98\n", + " banded gecko 50 0.7 0.88\n", + " green iguana 50 0.76 0.94\n", + " Carolina anole 50 0.58 0.96\n", + "desert grassland whiptail lizard 50 0.82 0.94\n", + " agama 50 0.74 0.92\n", + " frilled-necked lizard 50 0.84 0.86\n", + " alligator lizard 50 0.58 0.78\n", + " Gila monster 50 0.72 0.8\n", + " European green lizard 50 0.42 0.9\n", + " chameleon 50 0.76 0.84\n", + " Komodo dragon 50 0.86 0.96\n", + " Nile crocodile 50 0.7 0.84\n", + " American alligator 50 0.76 0.96\n", + " triceratops 50 0.9 0.94\n", + " worm snake 50 0.76 0.88\n", + " ring-necked snake 50 0.8 0.92\n", + " eastern hog-nosed snake 50 0.58 0.88\n", + " smooth green snake 50 0.6 0.94\n", + " kingsnake 50 0.82 0.9\n", + " garter snake 50 0.88 0.94\n", + " water snake 50 0.7 0.94\n", + " vine snake 50 0.66 0.76\n", + " night snake 50 0.34 0.82\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.48 0.76\n", + " Indian cobra 50 0.82 0.94\n", + " green mamba 50 0.54 0.86\n", + " sea snake 50 0.62 0.9\n", + " Saharan horned viper 50 0.56 0.86\n", + "eastern diamondback rattlesnake 50 0.6 0.86\n", + " sidewinder 50 0.28 0.86\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.86 0.94\n", + " yellow garden spider 50 0.92 0.96\n", + " barn spider 50 0.38 0.98\n", + " European garden spider 50 0.62 0.98\n", + " southern black widow 50 0.88 0.94\n", + " tarantula 50 0.94 1\n", + " wolf spider 50 0.82 0.92\n", + " tick 50 0.74 0.84\n", + " centipede 50 0.68 0.82\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.78 0.94\n", + " ruffed grouse 50 0.88 1\n", + " prairie grouse 50 0.92 1\n", + " peacock 50 0.88 0.9\n", + " quail 50 0.9 0.94\n", + " partridge 50 0.74 0.96\n", + " grey parrot 50 0.9 0.96\n", + " macaw 50 0.88 0.98\n", + "sulphur-crested cockatoo 50 0.86 0.92\n", + " lorikeet 50 0.96 1\n", + " coucal 50 0.82 0.88\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.9 0.96\n", + " hummingbird 50 0.88 0.96\n", + " jacamar 50 0.92 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.76 0.94\n", + " red-breasted merganser 50 0.86 0.96\n", + " goose 50 0.74 0.96\n", + " black swan 50 0.94 0.98\n", + " tusker 50 0.54 0.92\n", + " echidna 50 0.98 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.78 0.88\n", + " koala 50 0.84 0.92\n", + " wombat 50 0.78 0.84\n", + " jellyfish 50 0.88 0.96\n", + " sea anemone 50 0.72 0.9\n", + " brain coral 50 0.88 0.96\n", + " flatworm 50 0.8 0.98\n", + " nematode 50 0.86 0.9\n", + " conch 50 0.74 0.88\n", + " snail 50 0.78 0.88\n", + " slug 50 0.74 0.82\n", + " sea slug 50 0.88 0.98\n", + " chiton 50 0.88 0.98\n", + " chambered nautilus 50 0.88 0.92\n", + " Dungeness crab 50 0.78 0.94\n", + " rock crab 50 0.68 0.86\n", + " fiddler crab 50 0.64 0.86\n", + " red king crab 50 0.76 0.96\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.74 0.88\n", + " crayfish 50 0.56 0.86\n", + " hermit crab 50 0.78 0.96\n", + " isopod 50 0.66 0.78\n", + " white stork 50 0.88 0.96\n", + " black stork 50 0.84 0.98\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.96\n", + " bittern 50 0.86 0.94\n", + " crane (bird) 50 0.62 0.9\n", + " limpkin 50 0.98 1\n", + " common gallinule 50 0.92 0.96\n", + " American coot 50 0.9 0.98\n", + " bustard 50 0.92 0.96\n", + " ruddy turnstone 50 0.94 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.9 0.96\n", + " dowitcher 50 0.84 0.96\n", + " oystercatcher 50 0.86 0.94\n", + " pelican 50 0.92 0.96\n", + " king penguin 50 0.88 0.96\n", + " albatross 50 0.9 1\n", + " grey whale 50 0.84 0.92\n", + " killer whale 50 0.92 1\n", + " dugong 50 0.84 0.96\n", + " sea lion 50 0.82 0.92\n", + " Chihuahua 50 0.66 0.84\n", + " Japanese Chin 50 0.72 0.98\n", + " Maltese 50 0.76 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.74 0.96\n", + " King Charles Spaniel 50 0.88 0.98\n", + " Papillon 50 0.86 0.94\n", + " toy terrier 50 0.48 0.94\n", + " Rhodesian Ridgeback 50 0.76 0.98\n", + " Afghan Hound 50 0.84 1\n", + " Basset Hound 50 0.8 0.92\n", + " Beagle 50 0.82 0.96\n", + " Bloodhound 50 0.48 0.72\n", + " Bluetick Coonhound 50 0.86 0.94\n", + " Black and Tan Coonhound 50 0.54 0.8\n", + "Treeing Walker Coonhound 50 0.66 0.98\n", + " English foxhound 50 0.32 0.84\n", + " Redbone Coonhound 50 0.62 0.94\n", + " borzoi 50 0.92 1\n", + " Irish Wolfhound 50 0.48 0.88\n", + " Italian Greyhound 50 0.76 0.98\n", + " Whippet 50 0.74 0.92\n", + " Ibizan Hound 50 0.6 0.86\n", + " Norwegian Elkhound 50 0.88 0.98\n", + " Otterhound 50 0.62 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 0.98\n", + " Weimaraner 50 0.88 0.94\n", + "Staffordshire Bull Terrier 50 0.66 0.98\n", + "American Staffordshire Terrier 50 0.64 0.92\n", + " Bedlington Terrier 50 0.9 0.92\n", + " Border Terrier 50 0.86 0.92\n", + " Kerry Blue Terrier 50 0.78 0.98\n", + " Irish Terrier 50 0.7 0.96\n", + " Norfolk Terrier 50 0.68 0.9\n", + " Norwich Terrier 50 0.72 1\n", + " Yorkshire Terrier 50 0.66 0.9\n", + " Wire Fox Terrier 50 0.64 0.98\n", + " Lakeland Terrier 50 0.74 0.92\n", + " Sealyham Terrier 50 0.76 0.9\n", + " Airedale Terrier 50 0.82 0.92\n", + " Cairn Terrier 50 0.76 0.9\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.82 0.92\n", + " Boston Terrier 50 0.92 1\n", + " Miniature Schnauzer 50 0.68 0.9\n", + " Giant Schnauzer 50 0.72 0.98\n", + " Standard Schnauzer 50 0.74 1\n", + " Scottish Terrier 50 0.76 0.96\n", + " Tibetan Terrier 50 0.48 1\n", + "Australian Silky Terrier 50 0.66 0.96\n", + "Soft-coated Wheaten Terrier 50 0.74 0.96\n", + "West Highland White Terrier 50 0.88 0.96\n", + " Lhasa Apso 50 0.68 0.96\n", + " Flat-Coated Retriever 50 0.72 0.94\n", + " Curly-coated Retriever 50 0.82 0.94\n", + " Golden Retriever 50 0.86 0.94\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.76 0.96\n", + "German Shorthaired Pointer 50 0.8 0.96\n", + " Vizsla 50 0.68 0.96\n", + " English Setter 50 0.7 1\n", + " Irish Setter 50 0.8 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.84 0.96\n", + " Clumber Spaniel 50 0.92 0.96\n", + "English Springer Spaniel 50 0.88 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.94\n", + " Sussex Spaniel 50 0.72 0.92\n", + " Irish Water Spaniel 50 0.88 0.98\n", + " Kuvasz 50 0.66 0.9\n", + " Schipperke 50 0.9 0.98\n", + " Groenendael 50 0.8 0.94\n", + " Malinois 50 0.86 0.98\n", + " Briard 50 0.52 0.8\n", + " Australian Kelpie 50 0.6 0.88\n", + " Komondor 50 0.88 0.94\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.74 0.9\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.74 0.96\n", + " Bouvier des Flandres 50 0.78 0.94\n", + " Rottweiler 50 0.88 0.96\n", + " German Shepherd Dog 50 0.8 0.98\n", + " Dobermann 50 0.68 0.96\n", + " Miniature Pinscher 50 0.76 0.88\n", + "Greater Swiss Mountain Dog 50 0.68 0.94\n", + " Bernese Mountain Dog 50 0.96 1\n", + " Appenzeller Sennenhund 50 0.22 1\n", + " Entlebucher Sennenhund 50 0.64 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.78 0.98\n", + " Tibetan Mastiff 50 0.88 0.96\n", + " French Bulldog 50 0.84 0.94\n", + " Great Dane 50 0.54 0.9\n", + " St. Bernard 50 0.92 1\n", + " husky 50 0.46 0.98\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.46 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.78 0.9\n", + " Basenji 50 0.92 0.94\n", + " pug 50 0.94 0.98\n", + " Leonberger 50 1 1\n", + " Newfoundland 50 0.78 0.96\n", + " Pyrenean Mountain Dog 50 0.78 0.96\n", + " Samoyed 50 0.96 1\n", + " Pomeranian 50 0.98 1\n", + " Chow Chow 50 0.9 0.96\n", + " Keeshond 50 0.88 0.94\n", + " Griffon Bruxellois 50 0.84 0.98\n", + " Pembroke Welsh Corgi 50 0.82 0.94\n", + " Cardigan Welsh Corgi 50 0.66 0.98\n", + " Toy Poodle 50 0.52 0.88\n", + " Miniature Poodle 50 0.52 0.92\n", + " Standard Poodle 50 0.8 1\n", + " Mexican hairless dog 50 0.88 0.98\n", + " grey wolf 50 0.82 0.92\n", + " Alaskan tundra wolf 50 0.78 0.98\n", + " red wolf 50 0.48 0.9\n", + " coyote 50 0.64 0.86\n", + " dingo 50 0.76 0.88\n", + " dhole 50 0.9 0.98\n", + " African wild dog 50 0.98 1\n", + " hyena 50 0.88 0.96\n", + " red fox 50 0.54 0.92\n", + " kit fox 50 0.72 0.98\n", + " Arctic fox 50 0.94 1\n", + " grey fox 50 0.7 0.94\n", + " tabby cat 50 0.54 0.92\n", + " tiger cat 50 0.22 0.94\n", + " Persian cat 50 0.9 0.98\n", + " Siamese cat 50 0.96 1\n", + " Egyptian Mau 50 0.54 0.8\n", + " cougar 50 0.9 1\n", + " lynx 50 0.72 0.88\n", + " leopard 50 0.78 0.98\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.7 0.94\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.92 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.94 0.98\n", + " American black bear 50 0.8 1\n", + " polar bear 50 0.84 0.96\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.92\n", + " meerkat 50 0.82 0.92\n", + " tiger beetle 50 0.92 0.94\n", + " ladybug 50 0.86 0.94\n", + " ground beetle 50 0.64 0.94\n", + " longhorn beetle 50 0.62 0.88\n", + " leaf beetle 50 0.64 0.98\n", + " dung beetle 50 0.86 0.98\n", + " rhinoceros beetle 50 0.86 0.94\n", + " weevil 50 0.9 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.68 0.94\n", + " ant 50 0.68 0.78\n", + " grasshopper 50 0.5 0.92\n", + " cricket 50 0.64 0.92\n", + " stick insect 50 0.64 0.92\n", + " cockroach 50 0.72 0.8\n", + " mantis 50 0.64 0.86\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.88 0.94\n", + " lacewing 50 0.78 0.92\n", + " dragonfly 50 0.82 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.94 0.96\n", + " ringlet 50 0.86 0.98\n", + " monarch butterfly 50 0.9 0.92\n", + " small white 50 0.9 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.88 1\n", + " starfish 50 0.88 0.92\n", + " sea urchin 50 0.84 0.94\n", + " sea cucumber 50 0.66 0.84\n", + " cottontail rabbit 50 0.72 0.94\n", + " hare 50 0.84 0.96\n", + " Angora rabbit 50 0.94 0.98\n", + " hamster 50 0.96 1\n", + " porcupine 50 0.88 0.98\n", + " fox squirrel 50 0.76 0.94\n", + " marmot 50 0.92 0.96\n", + " beaver 50 0.78 0.94\n", + " guinea pig 50 0.78 0.94\n", + " common sorrel 50 0.96 0.98\n", + " zebra 50 0.94 0.96\n", + " pig 50 0.5 0.76\n", + " wild boar 50 0.84 0.96\n", + " warthog 50 0.84 0.96\n", + " hippopotamus 50 0.88 0.96\n", + " ox 50 0.48 0.94\n", + " water buffalo 50 0.78 0.94\n", + " bison 50 0.88 0.96\n", + " ram 50 0.58 0.92\n", + " bighorn sheep 50 0.66 1\n", + " Alpine ibex 50 0.92 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.82 0.96\n", + " gazelle 50 0.7 0.96\n", + " dromedary 50 0.9 1\n", + " llama 50 0.82 0.94\n", + " weasel 50 0.44 0.92\n", + " mink 50 0.78 0.96\n", + " European polecat 50 0.46 0.9\n", + " black-footed ferret 50 0.68 0.96\n", + " otter 50 0.66 0.88\n", + " skunk 50 0.96 0.96\n", + " badger 50 0.86 0.92\n", + " armadillo 50 0.88 0.9\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.78 0.92\n", + " gorilla 50 0.82 0.94\n", + " chimpanzee 50 0.84 0.94\n", + " gibbon 50 0.76 0.86\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.8 0.94\n", + " patas monkey 50 0.62 0.82\n", + " baboon 50 0.9 0.98\n", + " macaque 50 0.8 0.86\n", + " langur 50 0.6 0.82\n", + " black-and-white colobus 50 0.86 0.9\n", + " proboscis monkey 50 1 1\n", + " marmoset 50 0.74 0.98\n", + " white-headed capuchin 50 0.72 0.9\n", + " howler monkey 50 0.86 0.94\n", + " titi 50 0.5 0.9\n", + "Geoffroy's spider monkey 50 0.42 0.8\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.72 0.94\n", + " indri 50 0.9 0.96\n", + " Asian elephant 50 0.58 0.92\n", + " African bush elephant 50 0.7 0.98\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.94 0.98\n", + " snoek 50 0.74 0.9\n", + " eel 50 0.6 0.84\n", + " coho salmon 50 0.84 0.96\n", + " rock beauty 50 0.88 0.98\n", + " clownfish 50 0.78 0.98\n", + " sturgeon 50 0.68 0.94\n", + " garfish 50 0.62 0.8\n", + " lionfish 50 0.96 0.96\n", + " pufferfish 50 0.88 0.96\n", + " abacus 50 0.74 0.88\n", + " abaya 50 0.84 0.92\n", + " academic gown 50 0.42 0.86\n", + " accordion 50 0.8 0.9\n", + " acoustic guitar 50 0.5 0.76\n", + " aircraft carrier 50 0.8 0.96\n", + " airliner 50 0.92 1\n", + " airship 50 0.76 0.82\n", + " altar 50 0.64 0.98\n", + " ambulance 50 0.88 0.98\n", + " amphibious vehicle 50 0.64 0.94\n", + " analog clock 50 0.52 0.92\n", + " apiary 50 0.82 0.96\n", + " apron 50 0.7 0.84\n", + " waste container 50 0.4 0.8\n", + " assault rifle 50 0.42 0.84\n", + " backpack 50 0.34 0.64\n", + " bakery 50 0.4 0.68\n", + " balance beam 50 0.8 0.98\n", + " balloon 50 0.86 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.7 0.9\n", + " banjo 50 0.84 1\n", + " baluster 50 0.68 0.94\n", + " barbell 50 0.56 0.9\n", + " barber chair 50 0.7 0.92\n", + " barbershop 50 0.54 0.86\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.84 0.98\n", + " barrel 50 0.56 0.88\n", + " wheelbarrow 50 0.66 0.88\n", + " baseball 50 0.74 0.98\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.66 0.92\n", + " bassoon 50 0.74 0.98\n", + " swimming cap 50 0.62 0.88\n", + " bath towel 50 0.54 0.78\n", + " bathtub 50 0.4 0.88\n", + " station wagon 50 0.66 0.84\n", + " lighthouse 50 0.78 0.94\n", + " beaker 50 0.52 0.68\n", + " military cap 50 0.84 0.96\n", + " beer bottle 50 0.66 0.88\n", + " beer glass 50 0.6 0.84\n", + " bell-cot 50 0.56 0.96\n", + " bib 50 0.58 0.82\n", + " tandem bicycle 50 0.86 0.96\n", + " bikini 50 0.56 0.88\n", + " ring binder 50 0.64 0.84\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.94\n", + " boathouse 50 0.74 0.92\n", + " bobsleigh 50 0.92 0.96\n", + " bolo tie 50 0.8 0.94\n", + " poke bonnet 50 0.64 0.86\n", + " bookcase 50 0.66 0.92\n", + " bookstore 50 0.62 0.88\n", + " bottle cap 50 0.58 0.7\n", + " bow 50 0.72 0.86\n", + " bow tie 50 0.7 0.9\n", + " brass 50 0.92 0.96\n", + " bra 50 0.5 0.7\n", + " breakwater 50 0.62 0.86\n", + " breastplate 50 0.4 0.9\n", + " broom 50 0.6 0.86\n", + " bucket 50 0.66 0.8\n", + " buckle 50 0.5 0.68\n", + " bulletproof vest 50 0.5 0.78\n", + " high-speed train 50 0.94 0.96\n", + " butcher shop 50 0.74 0.94\n", + " taxicab 50 0.64 0.86\n", + " cauldron 50 0.44 0.66\n", + " candle 50 0.48 0.74\n", + " cannon 50 0.88 0.94\n", + " canoe 50 0.94 1\n", + " can opener 50 0.66 0.86\n", + " cardigan 50 0.68 0.8\n", + " car mirror 50 0.94 0.96\n", + " carousel 50 0.94 0.98\n", + " tool kit 50 0.56 0.78\n", + " carton 50 0.42 0.7\n", + " car wheel 50 0.38 0.74\n", + "automated teller machine 50 0.76 0.94\n", + " cassette 50 0.52 0.8\n", + " cassette player 50 0.28 0.9\n", + " castle 50 0.78 0.88\n", + " catamaran 50 0.78 1\n", + " CD player 50 0.52 0.82\n", + " cello 50 0.82 1\n", + " mobile phone 50 0.68 0.86\n", + " chain 50 0.38 0.66\n", + " chain-link fence 50 0.7 0.84\n", + " chain mail 50 0.64 0.9\n", + " chainsaw 50 0.84 0.92\n", + " chest 50 0.68 0.92\n", + " chiffonier 50 0.26 0.64\n", + " chime 50 0.62 0.84\n", + " china cabinet 50 0.82 0.96\n", + " Christmas stocking 50 0.92 0.94\n", + " church 50 0.62 0.9\n", + " movie theater 50 0.58 0.88\n", + " cleaver 50 0.32 0.62\n", + " cliff dwelling 50 0.88 1\n", + " cloak 50 0.32 0.64\n", + " clogs 50 0.58 0.88\n", + " cocktail shaker 50 0.62 0.7\n", + " coffee mug 50 0.44 0.72\n", + " coffeemaker 50 0.64 0.92\n", + " coil 50 0.66 0.84\n", + " combination lock 50 0.64 0.84\n", + " computer keyboard 50 0.7 0.82\n", + " confectionery store 50 0.54 0.86\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 0.98\n", + " corkscrew 50 0.82 0.92\n", + " cornet 50 0.46 0.88\n", + " cowboy boot 50 0.64 0.8\n", + " cowboy hat 50 0.64 0.82\n", + " cradle 50 0.38 0.8\n", + " crane (machine) 50 0.78 0.94\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.52 0.82\n", + " infant bed 50 0.74 1\n", + " Crock Pot 50 0.78 0.9\n", + " croquet ball 50 0.9 0.96\n", + " crutch 50 0.46 0.7\n", + " cuirass 50 0.54 0.86\n", + " dam 50 0.74 0.92\n", + " desk 50 0.6 0.86\n", + " desktop computer 50 0.54 0.94\n", + " rotary dial telephone 50 0.88 0.94\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.54 0.76\n", + " digital watch 50 0.58 0.86\n", + " dining table 50 0.76 0.9\n", + " dishcloth 50 0.94 1\n", + " dishwasher 50 0.44 0.78\n", + " disc brake 50 0.98 1\n", + " dock 50 0.54 0.94\n", + " dog sled 50 0.84 1\n", + " dome 50 0.72 0.92\n", + " doormat 50 0.56 0.82\n", + " drilling rig 50 0.84 0.96\n", + " drum 50 0.38 0.68\n", + " drumstick 50 0.56 0.72\n", + " dumbbell 50 0.62 0.9\n", + " Dutch oven 50 0.7 0.84\n", + " electric fan 50 0.82 0.86\n", + " electric guitar 50 0.62 0.84\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.9 0.98\n", + " envelope 50 0.44 0.86\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.7 0.92\n", + " feather boa 50 0.7 0.84\n", + " filing cabinet 50 0.88 0.98\n", + " fireboat 50 0.94 0.98\n", + " fire engine 50 0.84 0.9\n", + " fire screen sheet 50 0.62 0.76\n", + " flagpole 50 0.74 0.88\n", + " flute 50 0.36 0.72\n", + " folding chair 50 0.62 0.84\n", + " football helmet 50 0.86 0.94\n", + " forklift 50 0.8 0.92\n", + " fountain 50 0.84 0.94\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.78 0.94\n", + " freight car 50 0.96 1\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.36 0.78\n", + " fur coat 50 0.84 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.84 0.92\n", + " gas pump 50 0.9 0.98\n", + " goblet 50 0.68 0.82\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.84 0.9\n", + " golf cart 50 0.78 0.86\n", + " gondola 50 0.98 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.62 0.96\n", + " grand piano 50 0.7 0.96\n", + " greenhouse 50 0.8 0.98\n", + " grille 50 0.72 0.9\n", + " grocery store 50 0.66 0.94\n", + " guillotine 50 0.86 0.92\n", + " barrette 50 0.52 0.66\n", + " hair spray 50 0.5 0.74\n", + " half-track 50 0.78 0.9\n", + " hammer 50 0.56 0.76\n", + " hamper 50 0.64 0.84\n", + " hair dryer 50 0.56 0.74\n", + " hand-held computer 50 0.42 0.86\n", + " handkerchief 50 0.78 0.94\n", + " hard disk drive 50 0.76 0.84\n", + " harmonica 50 0.7 0.88\n", + " harp 50 0.88 0.96\n", + " harvester 50 0.78 1\n", + " hatchet 50 0.54 0.74\n", + " holster 50 0.66 0.84\n", + " home theater 50 0.64 0.94\n", + " honeycomb 50 0.56 0.88\n", + " hook 50 0.3 0.6\n", + " hoop skirt 50 0.64 0.86\n", + " horizontal bar 50 0.68 0.98\n", + " horse-drawn vehicle 50 0.88 0.94\n", + " hourglass 50 0.88 0.96\n", + " iPod 50 0.76 0.94\n", + " clothes iron 50 0.82 0.88\n", + " jack-o'-lantern 50 0.98 0.98\n", + " jeans 50 0.68 0.84\n", + " jeep 50 0.72 0.9\n", + " T-shirt 50 0.72 0.96\n", + " jigsaw puzzle 50 0.84 0.94\n", + " pulled rickshaw 50 0.86 0.94\n", + " joystick 50 0.8 0.9\n", + " kimono 50 0.84 0.96\n", + " knee pad 50 0.62 0.88\n", + " knot 50 0.66 0.8\n", + " lab coat 50 0.8 0.96\n", + " ladle 50 0.36 0.64\n", + " lampshade 50 0.48 0.84\n", + " laptop computer 50 0.26 0.88\n", + " lawn mower 50 0.78 0.96\n", + " lens cap 50 0.46 0.72\n", + " paper knife 50 0.26 0.5\n", + " library 50 0.54 0.9\n", + " lifeboat 50 0.92 0.98\n", + " lighter 50 0.56 0.78\n", + " limousine 50 0.76 0.92\n", + " ocean liner 50 0.88 0.94\n", + " lipstick 50 0.74 0.9\n", + " slip-on shoe 50 0.74 0.92\n", + " lotion 50 0.5 0.86\n", + " speaker 50 0.52 0.68\n", + " loupe 50 0.32 0.52\n", + " sawmill 50 0.72 0.9\n", + " magnetic compass 50 0.52 0.82\n", + " mail bag 50 0.68 0.92\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.22 0.94\n", + " tank suit 50 0.24 0.9\n", + " manhole cover 50 0.96 0.98\n", + " maraca 50 0.74 0.9\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.44 0.82\n", + " match 50 0.66 0.9\n", + " maypole 50 0.96 1\n", + " maze 50 0.8 0.96\n", + " measuring cup 50 0.54 0.76\n", + " medicine chest 50 0.6 0.84\n", + " megalith 50 0.8 0.92\n", + " microphone 50 0.52 0.7\n", + " microwave oven 50 0.48 0.72\n", + " military uniform 50 0.62 0.84\n", + " milk can 50 0.68 0.82\n", + " minibus 50 0.7 1\n", + " miniskirt 50 0.46 0.76\n", + " minivan 50 0.38 0.8\n", + " missile 50 0.4 0.84\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.8 0.92\n", + " mobile home 50 0.54 0.78\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.58 0.86\n", + " monastery 50 0.44 0.9\n", + " monitor 50 0.4 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.68 0.94\n", + " square academic cap 50 0.5 0.84\n", + " mosque 50 0.9 1\n", + " mosquito net 50 0.9 0.98\n", + " scooter 50 0.9 0.98\n", + " mountain bike 50 0.78 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.42 0.82\n", + " mousetrap 50 0.76 0.88\n", + " moving van 50 0.4 0.72\n", + " muzzle 50 0.5 0.72\n", + " nail 50 0.68 0.74\n", + " neck brace 50 0.56 0.68\n", + " necklace 50 0.86 1\n", + " nipple 50 0.7 0.88\n", + " notebook computer 50 0.34 0.84\n", + " obelisk 50 0.8 0.92\n", + " oboe 50 0.6 0.84\n", + " ocarina 50 0.8 0.86\n", + " odometer 50 0.96 1\n", + " oil filter 50 0.58 0.82\n", + " organ 50 0.82 0.9\n", + " oscilloscope 50 0.9 0.96\n", + " overskirt 50 0.2 0.7\n", + " bullock cart 50 0.7 0.94\n", + " oxygen mask 50 0.46 0.84\n", + " packet 50 0.5 0.78\n", + " paddle 50 0.56 0.94\n", + " paddle wheel 50 0.86 0.96\n", + " padlock 50 0.74 0.78\n", + " paintbrush 50 0.62 0.8\n", + " pajamas 50 0.56 0.92\n", + " palace 50 0.64 0.96\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.66 0.84\n", + " parachute 50 0.92 0.94\n", + " parallel bars 50 0.62 0.96\n", + " park bench 50 0.74 0.9\n", + " parking meter 50 0.84 0.92\n", + " passenger car 50 0.5 0.82\n", + " patio 50 0.58 0.84\n", + " payphone 50 0.74 0.92\n", + " pedestal 50 0.52 0.9\n", + " pencil case 50 0.64 0.92\n", + " pencil sharpener 50 0.52 0.78\n", + " perfume 50 0.7 0.9\n", + " Petri dish 50 0.6 0.8\n", + " photocopier 50 0.88 0.98\n", + " plectrum 50 0.7 0.84\n", + " Pickelhaube 50 0.72 0.86\n", + " picket fence 50 0.84 0.94\n", + " pickup truck 50 0.64 0.92\n", + " pier 50 0.52 0.82\n", + " piggy bank 50 0.82 0.94\n", + " pill bottle 50 0.76 0.86\n", + " pillow 50 0.76 0.9\n", + " ping-pong ball 50 0.84 0.88\n", + " pinwheel 50 0.76 0.88\n", + " pirate ship 50 0.76 0.94\n", + " pitcher 50 0.46 0.84\n", + " hand plane 50 0.84 0.94\n", + " planetarium 50 0.88 0.98\n", + " plastic bag 50 0.36 0.62\n", + " plate rack 50 0.52 0.78\n", + " plow 50 0.78 0.88\n", + " plunger 50 0.42 0.7\n", + " Polaroid camera 50 0.84 0.92\n", + " pole 50 0.38 0.74\n", + " police van 50 0.76 0.94\n", + " poncho 50 0.58 0.86\n", + " billiard table 50 0.8 0.88\n", + " soda bottle 50 0.56 0.94\n", + " pot 50 0.78 0.92\n", + " potter's wheel 50 0.9 0.94\n", + " power drill 50 0.42 0.72\n", + " prayer rug 50 0.7 0.86\n", + " printer 50 0.54 0.86\n", + " prison 50 0.7 0.9\n", + " projectile 50 0.28 0.9\n", + " projector 50 0.62 0.84\n", + " hockey puck 50 0.92 0.96\n", + " punching bag 50 0.6 0.68\n", + " purse 50 0.42 0.78\n", + " quill 50 0.68 0.84\n", + " quilt 50 0.64 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.72 0.9\n", + " radiator 50 0.66 0.76\n", + " radio 50 0.64 0.92\n", + " radio telescope 50 0.9 0.96\n", + " rain barrel 50 0.8 0.98\n", + " recreational vehicle 50 0.84 0.94\n", + " reel 50 0.72 0.82\n", + " reflex camera 50 0.72 0.92\n", + " refrigerator 50 0.7 0.9\n", + " remote control 50 0.7 0.88\n", + " restaurant 50 0.5 0.66\n", + " revolver 50 0.82 1\n", + " rifle 50 0.38 0.7\n", + " rocking chair 50 0.62 0.84\n", + " rotisserie 50 0.88 0.92\n", + " eraser 50 0.54 0.76\n", + " rugby ball 50 0.86 0.94\n", + " ruler 50 0.68 0.86\n", + " running shoe 50 0.78 0.94\n", + " safe 50 0.82 0.92\n", + " safety pin 50 0.4 0.62\n", + " salt shaker 50 0.66 0.9\n", + " sandal 50 0.66 0.86\n", + " sarong 50 0.64 0.86\n", + " saxophone 50 0.66 0.88\n", + " scabbard 50 0.76 0.92\n", + " weighing scale 50 0.58 0.78\n", + " school bus 50 0.92 1\n", + " schooner 50 0.84 1\n", + " scoreboard 50 0.9 0.96\n", + " CRT screen 50 0.14 0.7\n", + " screw 50 0.9 0.98\n", + " screwdriver 50 0.3 0.58\n", + " seat belt 50 0.88 0.94\n", + " sewing machine 50 0.76 0.9\n", + " shield 50 0.56 0.82\n", + " shoe store 50 0.78 0.96\n", + " shoji 50 0.8 0.92\n", + " shopping basket 50 0.52 0.88\n", + " shopping cart 50 0.76 0.92\n", + " shovel 50 0.62 0.84\n", + " shower cap 50 0.7 0.84\n", + " shower curtain 50 0.64 0.82\n", + " ski 50 0.74 0.92\n", + " ski mask 50 0.72 0.88\n", + " sleeping bag 50 0.68 0.8\n", + " slide rule 50 0.72 0.88\n", + " sliding door 50 0.44 0.78\n", + " slot machine 50 0.94 0.98\n", + " snorkel 50 0.86 0.98\n", + " snowmobile 50 0.88 1\n", + " snowplow 50 0.84 0.98\n", + " soap dispenser 50 0.56 0.86\n", + " soccer ball 50 0.86 0.96\n", + " sock 50 0.62 0.76\n", + " solar thermal collector 50 0.72 0.96\n", + " sombrero 50 0.6 0.84\n", + " soup bowl 50 0.56 0.94\n", + " space bar 50 0.34 0.88\n", + " space heater 50 0.52 0.74\n", + " space shuttle 50 0.82 0.96\n", + " spatula 50 0.3 0.6\n", + " motorboat 50 0.86 1\n", + " spider web 50 0.7 0.9\n", + " spindle 50 0.86 0.98\n", + " sports car 50 0.6 0.94\n", + " spotlight 50 0.26 0.6\n", + " stage 50 0.68 0.86\n", + " steam locomotive 50 0.94 1\n", + " through arch bridge 50 0.84 0.96\n", + " steel drum 50 0.82 0.9\n", + " stethoscope 50 0.6 0.82\n", + " scarf 50 0.5 0.92\n", + " stone wall 50 0.76 0.9\n", + " stopwatch 50 0.58 0.9\n", + " stove 50 0.46 0.74\n", + " strainer 50 0.64 0.84\n", + " tram 50 0.88 0.96\n", + " stretcher 50 0.6 0.8\n", + " couch 50 0.8 0.96\n", + " stupa 50 0.88 0.88\n", + " submarine 50 0.72 0.92\n", + " suit 50 0.4 0.78\n", + " sundial 50 0.58 0.74\n", + " sunglass 50 0.14 0.58\n", + " sunglasses 50 0.28 0.58\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.6 0.94\n", + " mop 50 0.74 0.92\n", + " sweatshirt 50 0.28 0.66\n", + " swimsuit 50 0.52 0.82\n", + " swing 50 0.76 0.84\n", + " switch 50 0.56 0.76\n", + " syringe 50 0.62 0.82\n", + " table lamp 50 0.6 0.88\n", + " tank 50 0.8 0.96\n", + " tape player 50 0.46 0.76\n", + " teapot 50 0.84 1\n", + " teddy bear 50 0.82 0.94\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.88 0.9\n", + " front curtain 50 0.8 0.92\n", + " thimble 50 0.6 0.8\n", + " threshing machine 50 0.56 0.88\n", + " throne 50 0.72 0.82\n", + " tile roof 50 0.72 0.94\n", + " toaster 50 0.66 0.84\n", + " tobacco shop 50 0.42 0.7\n", + " toilet seat 50 0.62 0.88\n", + " torch 50 0.64 0.84\n", + " totem pole 50 0.92 0.98\n", + " tow truck 50 0.62 0.88\n", + " toy store 50 0.6 0.94\n", + " tractor 50 0.76 0.98\n", + " semi-trailer truck 50 0.78 0.92\n", + " tray 50 0.46 0.64\n", + " trench coat 50 0.54 0.72\n", + " tricycle 50 0.72 0.94\n", + " trimaran 50 0.7 0.98\n", + " tripod 50 0.58 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.9 1\n", + " trombone 50 0.54 0.88\n", + " tub 50 0.24 0.82\n", + " turnstile 50 0.84 0.94\n", + " typewriter keyboard 50 0.68 0.98\n", + " umbrella 50 0.52 0.7\n", + " unicycle 50 0.74 0.96\n", + " upright piano 50 0.76 0.9\n", + " vacuum cleaner 50 0.62 0.9\n", + " vase 50 0.5 0.78\n", + " vault 50 0.76 0.92\n", + " velvet 50 0.2 0.42\n", + " vending machine 50 0.9 1\n", + " vestment 50 0.54 0.82\n", + " viaduct 50 0.78 0.86\n", + " violin 50 0.68 0.78\n", + " volleyball 50 0.86 1\n", + " waffle iron 50 0.72 0.88\n", + " wall clock 50 0.54 0.88\n", + " wallet 50 0.52 0.9\n", + " wardrobe 50 0.68 0.88\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.72 0.96\n", + " washing machine 50 0.78 0.94\n", + " water bottle 50 0.54 0.74\n", + " water jug 50 0.22 0.74\n", + " water tower 50 0.9 0.96\n", + " whiskey jug 50 0.64 0.74\n", + " whistle 50 0.72 0.84\n", + " wig 50 0.84 0.9\n", + " window screen 50 0.68 0.8\n", + " window shade 50 0.52 0.76\n", + " Windsor tie 50 0.22 0.66\n", + " wine bottle 50 0.42 0.82\n", + " wing 50 0.54 0.96\n", + " wok 50 0.46 0.82\n", + " wooden spoon 50 0.58 0.8\n", + " wool 50 0.32 0.82\n", + " split-rail fence 50 0.74 0.9\n", + " shipwreck 50 0.84 0.96\n", + " yawl 50 0.78 0.96\n", + " yurt 50 0.84 1\n", + " website 50 0.98 1\n", + " comic book 50 0.62 0.9\n", + " crossword 50 0.84 0.88\n", + " traffic sign 50 0.78 0.9\n", + " traffic light 50 0.8 0.94\n", + " dust jacket 50 0.72 0.94\n", + " menu 50 0.82 0.96\n", + " plate 50 0.44 0.88\n", + " guacamole 50 0.8 0.92\n", + " consomme 50 0.54 0.88\n", + " hot pot 50 0.86 0.98\n", + " trifle 50 0.92 0.98\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.62 0.84\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.92\n", + " pretzel 50 0.72 0.88\n", + " cheeseburger 50 0.9 1\n", + " hot dog 50 0.74 0.94\n", + " mashed potato 50 0.74 0.9\n", + " cabbage 50 0.84 0.96\n", + " broccoli 50 0.9 0.96\n", + " cauliflower 50 0.82 1\n", + " zucchini 50 0.74 0.9\n", + " spaghetti squash 50 0.8 0.96\n", + " acorn squash 50 0.82 0.96\n", + " butternut squash 50 0.7 0.94\n", + " cucumber 50 0.6 0.96\n", + " artichoke 50 0.84 0.94\n", + " bell pepper 50 0.84 0.98\n", + " cardoon 50 0.88 0.94\n", + " mushroom 50 0.38 0.92\n", + " Granny Smith 50 0.9 0.96\n", + " strawberry 50 0.6 0.88\n", + " orange 50 0.7 0.92\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.82 0.96\n", + " pineapple 50 0.86 0.96\n", + " banana 50 0.84 0.96\n", + " jackfruit 50 0.9 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.82 0.98\n", + " hay 50 0.8 0.92\n", + " carbonara 50 0.88 0.94\n", + " chocolate syrup 50 0.46 0.84\n", + " dough 50 0.4 0.6\n", + " meatloaf 50 0.58 0.84\n", + " pizza 50 0.84 0.96\n", + " pot pie 50 0.68 0.9\n", + " burrito 50 0.8 0.98\n", + " red wine 50 0.54 0.82\n", + " espresso 50 0.64 0.88\n", + " cup 50 0.38 0.7\n", + " eggnog 50 0.38 0.7\n", + " alp 50 0.54 0.88\n", + " bubble 50 0.8 0.96\n", + " cliff 50 0.64 1\n", + " coral reef 50 0.72 0.96\n", + " geyser 50 0.94 1\n", + " lakeshore 50 0.54 0.88\n", + " promontory 50 0.58 0.94\n", + " shoal 50 0.6 0.96\n", + " seashore 50 0.44 0.78\n", + " valley 50 0.72 0.94\n", + " volcano 50 0.78 0.96\n", + " baseball player 50 0.72 0.94\n", + " bridegroom 50 0.72 0.88\n", + " scuba diver 50 0.8 1\n", + " rapeseed 50 0.94 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.4 0.88\n", + " acorn 50 0.92 0.98\n", + " rose hip 50 0.92 0.98\n", + " horse chestnut seed 50 0.94 0.98\n", + " coral fungus 50 0.96 0.96\n", + " agaric 50 0.82 0.94\n", + " gyromitra 50 0.98 1\n", + " stinkhorn mushroom 50 0.8 0.94\n", + " earth star 50 0.98 1\n", + " hen-of-the-woods 50 0.8 0.96\n", + " bolete 50 0.74 0.94\n", + " ear 50 0.48 0.94\n", + " toilet paper 50 0.36 0.68\n", + "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " patio 50 0.6 0.84\n", - " payphone 50 0.78 0.94\n", - " pedestal 50 0.66 0.86\n", - " pencil case 50 0.74 0.98\n", - " pencil sharpener 50 0.6 0.76\n", - " perfume 50 0.66 0.96\n", - " Petri dish 50 0.64 0.82\n", - " photocopier 50 0.94 1\n", - " plectrum 50 0.72 0.92\n", - " Pickelhaube 50 0.78 0.88\n", - " picket fence 50 0.86 0.94\n", - " pickup truck 50 0.72 0.94\n", - " pier 50 0.54 0.92\n", - " piggy bank 50 0.8 0.94\n", - " pill bottle 50 0.72 0.9\n", - " pillow 50 0.76 0.88\n", - " ping-pong ball 50 0.78 0.88\n", - " pinwheel 50 0.8 0.94\n", - " pirate ship 50 0.76 0.92\n", - " pitcher 50 0.48 0.86\n", - " hand plane 50 0.9 0.92\n", - " planetarium 50 0.9 0.98\n", - " plastic bag 50 0.42 0.66\n", - " plate rack 50 0.52 0.82\n", - " plow 50 0.8 0.94\n", - " plunger 50 0.42 0.72\n", - " Polaroid camera 50 0.84 0.94\n", - " pole 50 0.4 0.76\n", - " police van 50 0.84 0.94\n", - " poncho 50 0.64 0.88\n", - " billiard table 50 0.84 0.92\n", - " soda bottle 50 0.58 0.9\n", - " pot 50 0.86 0.94\n", - " potter's wheel 50 0.92 0.94\n", - " power drill 50 0.38 0.7\n", - " prayer rug 50 0.7 0.88\n", - " printer 50 0.52 0.86\n", - " prison 50 0.66 0.9\n", - " projectile 50 0.34 0.96\n", - " projector 50 0.6 0.82\n", - " hockey puck 50 0.9 0.98\n", - " punching bag 50 0.62 0.72\n", - " purse 50 0.48 0.88\n", - " quill 50 0.78 0.86\n", - " quilt 50 0.6 0.9\n", - " race car 50 0.72 0.92\n", - " racket 50 0.78 0.94\n", - " radiator 50 0.7 0.84\n", - " radio 50 0.68 0.9\n", - " radio telescope 50 0.88 0.94\n", - " rain barrel 50 0.8 0.96\n", - " recreational vehicle 50 0.84 0.96\n", - " reel 50 0.72 0.8\n", - " reflex camera 50 0.76 0.96\n", - " refrigerator 50 0.76 0.92\n", - " remote control 50 0.72 0.94\n", - " restaurant 50 0.52 0.62\n", - " revolver 50 0.8 0.98\n", - " rifle 50 0.46 0.76\n", - " rocking chair 50 0.72 0.9\n", - " rotisserie 50 0.88 0.96\n", - " eraser 50 0.62 0.76\n", - " rugby ball 50 0.84 0.94\n", - " ruler 50 0.72 0.86\n", - " running shoe 50 0.84 0.94\n", - " safe 50 0.9 0.94\n", - " safety pin 50 0.48 0.8\n", - " salt shaker 50 0.62 0.8\n", - " sandal 50 0.7 0.82\n", - " sarong 50 0.62 0.8\n", - " saxophone 50 0.66 0.9\n", - " scabbard 50 0.78 0.92\n", - " weighing scale 50 0.62 0.84\n", - " school bus 50 0.92 1\n", - " schooner 50 0.8 1\n", - " scoreboard 50 0.86 0.98\n", - " CRT screen 50 0.16 0.8\n", - " screw 50 0.96 0.98\n", - " screwdriver 50 0.4 0.58\n", - " seat belt 50 0.9 0.92\n", - " sewing machine 50 0.74 0.94\n", - " shield 50 0.64 0.78\n", - " shoe store 50 0.84 0.98\n", - " shoji 50 0.76 0.92\n", - " shopping basket 50 0.52 0.84\n", - " shopping cart 50 0.76 0.9\n", - " shovel 50 0.7 0.84\n", - " shower cap 50 0.74 0.88\n", - " shower curtain 50 0.72 0.9\n", - " ski 50 0.68 0.94\n", - " ski mask 50 0.66 0.9\n", - " sleeping bag 50 0.66 0.8\n", - " slide rule 50 0.7 0.86\n", - " sliding door 50 0.54 0.76\n", - " slot machine 50 0.92 0.96\n", - " snorkel 50 0.86 1\n", - " snowmobile 50 0.86 0.96\n", - " snowplow 50 0.9 1\n", - " soap dispenser 50 0.52 0.9\n", - " soccer ball 50 0.84 0.98\n", - " sock 50 0.66 0.78\n", - " solar thermal collector 50 0.72 0.9\n", - " sombrero 50 0.7 0.84\n", - " soup bowl 50 0.6 0.94\n", - " space bar 50 0.32 0.84\n", - " space heater 50 0.64 0.74\n", - " space shuttle 50 0.86 0.98\n", - " spatula 50 0.28 0.6\n", - " motorboat 50 0.94 1\n", - " spider web 50 0.76 0.96\n", - " spindle 50 0.92 1\n", - " sports car 50 0.5 0.96\n", - " spotlight 50 0.34 0.66\n", - " stage 50 0.76 0.92\n", - " steam locomotive 50 0.96 1\n", - " through arch bridge 50 0.82 0.96\n", - " steel drum 50 0.8 0.94\n", - " stethoscope 50 0.52 0.84\n", - " scarf 50 0.54 0.92\n", - " stone wall 50 0.8 0.92\n", - " stopwatch 50 0.54 0.9\n", - " stove 50 0.46 0.78\n", - " strainer 50 0.58 0.84\n", - " tram 50 0.9 0.96\n", - " stretcher 50 0.46 0.74\n", - " couch 50 0.72 0.94\n", - " stupa 50 0.84 0.9\n", - " submarine 50 0.78 0.9\n", - " suit 50 0.62 0.88\n", - " sundial 50 0.46 0.78\n", - " sunglass 50 0.18 0.6\n", - " sunglasses 50 0.32 0.64\n", - " sunscreen 50 0.32 0.7\n", - " suspension bridge 50 0.64 0.94\n", - " mop 50 0.8 0.96\n", - " sweatshirt 50 0.26 0.68\n", - " swimsuit 50 0.6 0.84\n", - " swing 50 0.78 0.88\n", - " switch 50 0.62 0.8\n", - " syringe 50 0.68 0.8\n", - " table lamp 50 0.54 0.88\n", - " tank 50 0.78 0.94\n", - " tape player 50 0.38 0.88\n", - " teapot 50 0.82 1\n", - " teddy bear 50 0.82 0.92\n", - " television 50 0.6 0.9\n", - " tennis ball 50 0.7 0.94\n", - " thatched roof 50 0.86 0.94\n", - " front curtain 50 0.76 0.94\n", - " thimble 50 0.68 0.82\n", - " threshing machine 50 0.64 0.9\n", - " throne 50 0.68 0.82\n", - " tile roof 50 0.84 0.96\n", - " toaster 50 0.64 0.82\n", - " tobacco shop 50 0.44 0.74\n", - " toilet seat 50 0.64 0.88\n", - " torch 50 0.62 0.86\n", - " totem pole 50 0.9 1\n", - " tow truck 50 0.64 0.92\n", - " toy store 50 0.64 0.9\n", - " tractor 50 0.86 0.98\n", - " semi-trailer truck 50 0.76 0.96\n", - " tray 50 0.54 0.76\n", - " trench coat 50 0.6 0.78\n", - " tricycle 50 0.78 0.96\n", - " trimaran 50 0.78 0.98\n", - " tripod 50 0.66 0.86\n", - " triumphal arch 50 0.92 0.98\n", - " trolleybus 50 0.98 1\n", - " trombone 50 0.66 0.94\n", - " tub 50 0.3 0.86\n", - " turnstile 50 0.8 0.9\n", - " typewriter keyboard 50 0.74 0.98\n", - " umbrella 50 0.6 0.78\n", - " unicycle 50 0.78 0.96\n", - " upright piano 50 0.84 0.94\n", - " vacuum cleaner 50 0.84 0.92\n", - " vase 50 0.56 0.74\n", - " vault 50 0.78 0.9\n", - " velvet 50 0.22 0.5\n", - " vending machine 50 0.94 1\n", - " vestment 50 0.62 0.86\n", - " viaduct 50 0.78 0.88\n", - " violin 50 0.64 0.88\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " volleyball 50 0.96 1\n", - " waffle iron 50 0.72 0.84\n", - " wall clock 50 0.58 0.86\n", - " wallet 50 0.58 0.94\n", - " wardrobe 50 0.7 0.9\n", - " military aircraft 50 0.9 0.98\n", - " sink 50 0.74 0.94\n", - " washing machine 50 0.82 0.94\n", - " water bottle 50 0.54 0.68\n", - " water jug 50 0.3 0.78\n", - " water tower 50 0.94 0.96\n", - " whiskey jug 50 0.64 0.76\n", - " whistle 50 0.7 0.82\n", - " wig 50 0.86 0.88\n", - " window screen 50 0.7 0.82\n", - " window shade 50 0.54 0.9\n", - " Windsor tie 50 0.32 0.64\n", - " wine bottle 50 0.46 0.76\n", - " wing 50 0.52 0.96\n", - " wok 50 0.54 0.92\n", - " wooden spoon 50 0.62 0.86\n", - " wool 50 0.42 0.84\n", - " split-rail fence 50 0.7 0.92\n", - " shipwreck 50 0.86 0.98\n", - " yawl 50 0.76 0.92\n", - " yurt 50 0.86 0.96\n", - " website 50 0.98 1\n", - " comic book 50 0.72 0.88\n", - " crossword 50 0.8 0.88\n", - " traffic sign 50 0.72 0.9\n", - " traffic light 50 0.8 0.96\n", - " dust jacket 50 0.78 0.94\n", - " menu 50 0.8 0.96\n", - " plate 50 0.44 0.86\n", - " guacamole 50 0.76 0.96\n", - " consomme 50 0.52 0.92\n", - " hot pot 50 0.78 1\n", - " trifle 50 0.9 1\n", - " ice cream 50 0.68 0.94\n", - " ice pop 50 0.68 0.8\n", - " baguette 50 0.62 0.88\n", - " bagel 50 0.64 0.86\n", - " pretzel 50 0.68 0.9\n", - " cheeseburger 50 0.92 0.96\n", - " hot dog 50 0.74 0.96\n", - " mashed potato 50 0.72 0.88\n", - " cabbage 50 0.88 0.98\n", - " broccoli 50 0.88 0.96\n", - " cauliflower 50 0.84 0.98\n", - " zucchini 50 0.68 0.98\n", - " spaghetti squash 50 0.82 0.96\n", - " acorn squash 50 0.8 1\n", - " butternut squash 50 0.72 0.94\n", - " cucumber 50 0.66 0.94\n", - " artichoke 50 0.86 0.96\n", - " bell pepper 50 0.86 0.94\n", - " cardoon 50 0.92 0.94\n", - " mushroom 50 0.38 0.96\n", - " Granny Smith 50 0.9 0.98\n", - " strawberry 50 0.64 0.88\n", - " orange 50 0.74 0.94\n", - " lemon 50 0.78 0.98\n", - " fig 50 0.84 0.94\n", - " pineapple 50 0.9 1\n", - " banana 50 0.88 0.98\n", - " jackfruit 50 0.96 0.98\n", - " custard apple 50 0.86 0.96\n", - " pomegranate 50 0.8 0.96\n", - " hay 50 0.84 0.96\n", - " carbonara 50 0.88 0.96\n", - " chocolate syrup 50 0.58 0.94\n", - " dough 50 0.36 0.68\n", - " meatloaf 50 0.64 0.88\n", - " pizza 50 0.78 0.9\n", - " pot pie 50 0.66 0.92\n", - " burrito 50 0.88 0.98\n", - " red wine 50 0.66 0.84\n", - " espresso 50 0.66 0.9\n", - " cup 50 0.42 0.78\n", - " eggnog 50 0.36 0.64\n", - " alp 50 0.54 0.94\n", - " bubble 50 0.86 0.96\n", - " cliff 50 0.66 1\n", - " coral reef 50 0.74 0.94\n", - " geyser 50 0.92 1\n", - " lakeshore 50 0.52 0.86\n", - " promontory 50 0.58 0.92\n", - " shoal 50 0.66 0.98\n", - " seashore 50 0.44 0.86\n", - " valley 50 0.72 0.98\n", - " volcano 50 0.72 0.94\n", - " baseball player 50 0.74 0.96\n", - " bridegroom 50 0.78 0.92\n", - " scuba diver 50 0.82 1\n", - " rapeseed 50 0.98 0.98\n", - " daisy 50 0.96 0.98\n", - " yellow lady's slipper 50 1 1\n", - " corn 50 0.42 0.86\n", - " acorn 50 0.96 0.98\n", - " rose hip 50 0.9 0.96\n", - " horse chestnut seed 50 1 1\n", - " coral fungus 50 0.98 0.98\n", - " agaric 50 0.84 0.94\n", - " gyromitra 50 0.98 0.98\n", - " stinkhorn mushroom 50 0.84 0.92\n", - " earth star 50 1 1\n", - " hen-of-the-woods 50 0.9 0.96\n", - " bolete 50 0.8 0.94\n", - " ear 50 0.54 0.94\n", - " toilet paper 50 0.44 0.68\n", - "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", - "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s on Imagenet val\n", - "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", - "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", - "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", - "Image sizes 160 train, 160 test\n", - "Using 3 dataloader workers\n", - "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", - "\n", - " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", - " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", - " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", - "\n", - "Training complete (0.025 hours)\n", - "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", - "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", - "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", - "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", - "Visualize: https://netron.app\n", - "\n" - ] - } - ], - "source": [ - "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "Unzipping /content/datasets/imagenette160.zip...\n", + "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 224 train, 224 test\n", + "Using 1 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", + " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", + " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + "\n", + "Training complete (0.051 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] }, - "0ace3934ec6f4d36a1b3a9e086390926": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] }, - "35e03ce5090346c9ae602891470fc555": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", - "value": 818322941 - } + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Classification Tutorial", + "provenance": [] }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 9bf18554c3e4b250ba7063876f0191f573ffb7a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:46:15 +0100 Subject: [PATCH 149/277] Revert `--save-txt` to default False (#10213) * Revert `--save-txt` to default False Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 2 +- classify/tutorial.ipynb | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 96508d633da8..a9104ed315ec 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -196,7 +196,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index e035a7bda40d..9e65e53d8736 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -118,8 +118,7 @@ "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } ], @@ -1475,4 +1474,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 9bc60349b62500096832d78989336fcda200d286 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:48:47 +0100 Subject: [PATCH 150/277] Add `--source screen` Usage example (#10215) --- classify/predict.py | 1 + detect.py | 1 + segment/predict.py | 1 + 3 files changed, 3 insertions(+) diff --git a/classify/predict.py b/classify/predict.py index a9104ed315ec..9a6b00062932 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -6,6 +6,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/detect.py b/detect.py index 8e42fbe159d0..58b02802e6d9 100644 --- a/detect.py +++ b/detect.py @@ -6,6 +6,7 @@ $ python detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/segment/predict.py b/segment/predict.py index da1097c047c1..42389938cee7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -6,6 +6,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube From 9286336cb49d577873b2113739788bbe3b90f83c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:16:17 +0100 Subject: [PATCH 151/277] Add `git` info to training checkpoints (#9655) * Add git status on train checkpoints * Update * Update * Update * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 1 + train.py | 3 ++- utils/general.py | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 70dd7ce53ba3..85eb839df8a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ +gitpython ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 diff --git a/train.py b/train.py index bbbd6d07db00..6fa33f47d100 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, @@ -376,6 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 58181f00568d..57b6e4e78166 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,6 +29,7 @@ from zipfile import ZipFile, is_zipfile import cv2 +import git import IPython import numpy as np import pandas as pd @@ -344,6 +345,22 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): LOGGER.info(s) +@WorkingDirectory(ROOT) +def check_git(path='.'): + # YOLOv5 git check, return git {remote, branch, commit} + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + def check_python(minimum='3.7.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) @@ -1121,4 +1138,4 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm +GIT = check_git() # repo, branch, commit From 0307954e4e17da66e6bf36950f02972d976ba621 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:32:21 +0100 Subject: [PATCH 152/277] Add git info to cls, seg checkpoints (#10217) --- classify/train.py | 3 ++- segment/train.py | 9 ++------- train.py | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4422ca26b0ae..5faef08e876c 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -237,6 +237,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 2a0793d1aa3e..5d9ed78f527c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,7 +46,7 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) @@ -390,6 +390,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete @@ -498,12 +499,6 @@ def parse_opt(known=False): parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') - # Weights & Biases arguments - # parser.add_argument('--entity', default=None, help='W&B: Entity') - # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - return parser.parse_known_args()[0] if known else parser.parse_args() diff --git a/train.py b/train.py index 6fa33f47d100..1ea5c5bbeddd 100644 --- a/train.py +++ b/train.py @@ -376,7 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete From 6992dde4bd628f6bffe7d4c5025afadf79ed679b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 13:44:46 +0100 Subject: [PATCH 153/277] Update Comet preview image (#10220) * Update Comet preview image Pass through tinyjpg: 2.2MB -> 497kB :) Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 2 +- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 9e65e53d8736..956452a5aeda 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1351,7 +1351,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 4192c69da628..70bbf857d02b 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -466,7 +466,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 9d5aa9c85c51..6cf99650ad45 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -872,7 +872,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 3a51cb9b5a25..8f206cd9830e 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -51,7 +51,7 @@ python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yo That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI -yolo-ui +yolo-ui # Try out an Example! Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) From 40bb8030f8468eb7145ff648588aa5f96e32447c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 14:22:07 +0100 Subject: [PATCH 154/277] Scope gitpyhon import in `check_git_info()` (#10221) * Scope gitpyhon import in `check_git_info()` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 5 +++-- segment/train.py | 9 +++++---- train.py | 9 +++++---- utils/general.py | 9 ++++----- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5faef08e876c..a50845a4f781 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -50,6 +50,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(opt, device): @@ -237,7 +238,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 5d9ed78f527c..3f32d2100a75 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,9 +46,9 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels @@ -62,6 +62,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -390,7 +391,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/train.py b/train.py index 1ea5c5bbeddd..8b5446e58f2d 100644 --- a/train.py +++ b/train.py @@ -47,9 +47,9 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers @@ -63,6 +63,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -376,7 +377,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 57b6e4e78166..c5b738983719 100644 --- a/utils/general.py +++ b/utils/general.py @@ -13,7 +13,6 @@ import platform import random import re -import shutil import signal import sys import time @@ -29,7 +28,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import git import IPython import numpy as np import pandas as pd @@ -346,8 +344,10 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): @WorkingDirectory(ROOT) -def check_git(path='.'): - # YOLOv5 git check, return git {remote, branch, commit} +def check_git_info(path='.'): + # YOLOv5 git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git try: repo = git.Repo(path) remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' @@ -1138,4 +1138,3 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -GIT = check_git() # repo, branch, commit From 72cad39854a7d9ebbd4d58994cefa966b0da8fc1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 16:44:56 +0100 Subject: [PATCH 155/277] Squeezenet reshape outputs fix (#10222) @AyushExel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index fe934abf118c..77549b005ceb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -82,7 +82,7 @@ def reshape_classifier_output(model, n=1000): elif nn.Conv2d in types: i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != n: - m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) @contextmanager From be348cc33925738825ab40dd6eacdfe4afd4e215 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Nov 2022 16:54:36 +0100 Subject: [PATCH 156/277] Validate --task speed CPU fix (#10244) --- segment/val.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/val.py b/segment/val.py index 9bb8f9e4cf54..48bf28d4bf4f 100644 --- a/segment/val.py +++ b/segment/val.py @@ -444,7 +444,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False diff --git a/val.py b/val.py index ef282e37bdc1..7c610e83a856 100644 --- a/val.py +++ b/val.py @@ -380,7 +380,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False From 915bbf294bb74c859f0b41f1c23bc395014ea679 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 16:23:47 +0100 Subject: [PATCH 157/277] YOLOv5 v7.0 release updates (#10245) * YOLOv5 v7.0 splash image update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update download URLs to 7.0 assets Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 114 +++++++++++++++++++++++++------ classify/tutorial.ipynb | 5 +- data/scripts/download_weights.sh | 5 +- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- utils/downloads.py | 8 +-- 7 files changed, 107 insertions(+), 31 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 65ecd31a3e69..0a2f61ee35b2 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 0fa95f404117..298e14570860 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) @@ -50,6 +50,79 @@
+##
Segmentation ⭐ NEW
+ +
+ + +
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +``` + +### Val +Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) +--- |--- + +### Export +Export YOLOv5s-seg model to ONNX and TensorRT: +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ + ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -200,12 +273,12 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We

- YOLOv5-P5 640 Figure (click to expand) + YOLOv5-P5 640 Figure

- Figure Notes (click to expand) + Figure Notes - **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. - **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. @@ -216,22 +289,22 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes (click to expand) + Table Notes - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` @@ -240,12 +313,13 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
+ ##
Classification ⭐ NEW
-YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials.
- Classification Checkpoints (click to expand) + Classification Checkpoints
@@ -280,7 +354,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
- Classification Usage Examples (click to expand) + Classification Usage Examples  Open In Colab ### Train YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 956452a5aeda..a3da0dbd3231 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1452,7 +1452,8 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [] + "provenance": [], + "toc_visible": true }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index a4f3becfdbeb..31e0a15569f2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -11,11 +11,12 @@ python - <\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index 6cf99650ad45..7d7f1649cc8d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/utils/downloads.py b/utils/downloads.py index 21bb6608d5ba..72ea87340eb9 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -59,14 +59,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): LOGGER.info('') -def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. +def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 + version = f'tags/{version}' # i.e. tags/v7.0 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets From b32f67f6beb4a921c98301fe7724003e23103728 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:30:14 +0100 Subject: [PATCH 158/277] `--single-cls` segments fix (#10260) --single-cls segments fix May resolve #10230 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index e107d1a2bccf..cc5f8843ef18 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -537,8 +537,6 @@ def __init__(self, self.segments[i] = segment[j] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 - if segment: - self.segments[i][:, 0] = 0 # Rectangular Training if self.rect: From c9d47ae05632e2a42e560fbfeb22d3780224546c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:37:45 +0100 Subject: [PATCH 159/277] Created using Colaboratory --- tutorial.ipynb | 142 ++++++++++++++++++++++++------------------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7d7f1649cc8d..657dc266da92 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "300b4d5355ef4967bd5246afeef6eef5": { + "1f7df330663048998adcf8a45bc8f69b": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", - "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", - "IPY_MODEL_2667604641764341b0bc8c6afea438fd" + "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", + "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", + "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" ], - "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" + "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" } }, - "84e6829bb88845a8a4f42700b8496925": { + "e896e6096dd244c59d7955e2035cd729": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", + "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", "placeholder": "​", - "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", + "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", "value": "100%" } }, - "c038e52d41bf4d5b9602930c3d074087": { + "a6ff238c29984b24bf6d0bd175c19430": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", + "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", + "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", "value": 818322941 } }, - "2667604641764341b0bc8c6afea438fd": { + "3c085ba3f3fd4c3c8a6bb41b41ce1479": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", + "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", "placeholder": "​", - "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", - "value": " 780M/780M [00:57<00:00, 10.1MB/s]" + "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", + "value": " 780M/780M [00:05<00:00, 126MB/s]" } }, - "98b3a4806ed14102b0d75e6c571d6134": { + "16b0c8aa6e0f427e8a54d3791abb7504": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "c66a77395e42424d904699edcbb67291": { + "c7b2dd0f78384cad8e400b282996cdf5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "c4bbc15bf853439399dbcf1d40a5a407": { + "6a27e43b0e434edd82ee63f0a91036ca": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "0aaabfac395b43afbdd6d752c502bbf6": { + "cce0e6c0c4ec442cb47e65c674e02e92": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "3786d970492b4aa38f886f2572fd958c": { + "c5b9f38e2f0d4f9aa97fe87265263743": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "b86d0f2d7be74cebbcaa884b53123eeb": { + "df554fb955c7454696beac5a82889386": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "fa7b1497925a457f89286a71f073f416": { + "74e9112a87a242f4831b7d68c7da6333": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" + "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -446,9 +446,9 @@ " vid.mp4 # video\n", " screen # screenshot\n", " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] }, @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" + "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", - "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "300b4d5355ef4967bd5246afeef6eef5", - "84e6829bb88845a8a4f42700b8496925", - "c038e52d41bf4d5b9602930c3d074087", - "2667604641764341b0bc8c6afea438fd", - "98b3a4806ed14102b0d75e6c571d6134", - "c66a77395e42424d904699edcbb67291", - "c4bbc15bf853439399dbcf1d40a5a407", - "0aaabfac395b43afbdd6d752c502bbf6", - "3786d970492b4aa38f886f2572fd958c", - "b86d0f2d7be74cebbcaa884b53123eeb", - "fa7b1497925a457f89286a71f073f416" + "1f7df330663048998adcf8a45bc8f69b", + "e896e6096dd244c59d7955e2035cd729", + "a6ff238c29984b24bf6d0bd175c19430", + "3c085ba3f3fd4c3c8a6bb41b41ce1479", + "16b0c8aa6e0f427e8a54d3791abb7504", + "c7b2dd0f78384cad8e400b282996cdf5", + "6a27e43b0e434edd82ee63f0a91036ca", + "cce0e6c0c4ec442cb47e65c674e02e92", + "c5b9f38e2f0d4f9aa97fe87265263743", + "df554fb955c7454696beac5a82889386", + "74e9112a87a242f4831b7d68c7da6333" ] }, - "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" + "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "300b4d5355ef4967bd5246afeef6eef5" + "model_id": "1f7df330663048998adcf8a45bc8f69b" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" + "outputId": "5fc61358-7bc5-4310-a310-9059f66c6322" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 1977.30it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:12<00:00, 2.17it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.9ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.82s)\n", + "Done (t=0.43s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.49s)\n", + "DONE (t=5.85s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=74.26s).\n", + "DONE (t=82.22s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.46s).\n", + "DONE (t=14.92s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" + "outputId": "721b9028-767f-4a05-c964-692c245f7398" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", + "Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 255.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 20:47:54 +0100 Subject: [PATCH 160/277] Created using Colaboratory --- segment/tutorial.ipynb | 62 +++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index ad44f31d3833..09ca963d4b98 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" }, "outputs": [ { @@ -182,7 +182,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" }, "outputs": [ { @@ -190,15 +190,15 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -270,7 +270,7 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" }, "outputs": [ { @@ -279,15 +279,15 @@ "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", - "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -321,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 253.53it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 21:27:33 +0100 Subject: [PATCH 161/277] Created using Colaboratory --- classify/tutorial.ipynb | 63 +++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index a3da0dbd3231..c6f5d0d88a2d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + "outputId": "0806e375-610d-4ec0-c867-763dbb518279" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,24 +100,24 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", - "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", - "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", + "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } @@ -155,23 +155,23 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", "Resolving image-net.org (image-net.org)... 171.64.68.16\n", "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 6744924160 (6.3G) [application/x-tar]\n", "Saving to: ‘ILSVRC2012_img_val.tar’\n", "\n", - "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", "\n", - "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", "\n" ] } @@ -189,7 +189,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" }, "outputs": [ { @@ -197,11 +197,11 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", " Class Images top1_acc top5_acc\n", " all 50000 0.715 0.902\n", " tench 50 0.94 0.98\n", @@ -1269,30 +1269,30 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", - "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "100% 103M/103M [00:00<00:00, 347MB/s] \n", "Unzipping /content/datasets/imagenette160.zip...\n", - "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", "\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", @@ -1300,14 +1300,16 @@ "Image sizes 224 train, 224 test\n", "Using 1 dataloader workers\n", "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", "\n", " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", - " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", - " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", + " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", + " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", + " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", + " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", "\n", - "Training complete (0.051 hours)\n", + "Training complete (0.052 hours)\n", "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", @@ -1320,7 +1322,7 @@ ], "source": [ "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" ] }, { @@ -1452,8 +1454,7 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", @@ -1475,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From bfa1f23045c7c4136a9b8ced9d6be8249ed72692 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Nov 2022 16:34:14 +0100 Subject: [PATCH 162/277] FROM nvcr.io/nvidia/pytorch:22.11-py3 (#10279) * Update Docker usage examples * Update Dockerfile Signed-off-by: Glenn Jocher * Update DEBIAN_FRONTEND Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++++--- utils/docker/Dockerfile-cpu | 4 +++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index a5035c6abc33..1ecf4c64f75f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.10-py3 +FROM nvcr.io/nvidia/pytorch:22.11-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 8ec71622d9b6..eed1410793a1 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -9,8 +9,9 @@ FROM arm64v8/ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 @@ -30,12 +31,13 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push -# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t # Pull and Run -# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 017e2826458b..558f81f00584 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -9,8 +9,9 @@ FROM ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 @@ -29,6 +30,7 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- From 31c1f111868fc0dd7140ddce13e743f79bfaa9d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 00:28:50 +0100 Subject: [PATCH 163/277] `bbox_iou()` optimizations (#10296) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 65ea463c0dab..0be462551b89 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,12 +234,12 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) + w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps @@ -247,13 +247,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # IoU iou = inter / union if CIoU or DIoU or GIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 85f8379a68193cd9a9298e31035f01d304ac21f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 06:06:22 +0100 Subject: [PATCH 164/277] README Segmentation Usage fixes (#10298) Fixes per https://github.com/ultralytics/yolov5/issues/10288 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 298e14570860..028a1c2f064c 100644 --- a/README.md +++ b/README.md @@ -89,14 +89,14 @@ YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dat ```bash # Single-GPU -python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` ### Val -Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +Validate YOLOv5s-seg mask mAP on COCO dataset: ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate From 350e8eb69e01bb162ec0b22d1d13a1d1c2752853 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 17:33:43 +0100 Subject: [PATCH 165/277] Fix SegmentationModel Usage (#10303) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index e43d9b730fc6..3028e6581e63 100644 --- a/export.py +++ b/export.py @@ -596,6 +596,7 @@ def run( f = [str(x) for x in f if x] # filter out '' and None if any(f): cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ From f9ca3657f822da65a784aae7d750d86b69244ecb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 09:20:59 -0800 Subject: [PATCH 166/277] Ultralytics Live 1 - ClearML https://youtu.be/KS4weDInJYs (#10324) * Ultralytics Live Session banner - ClearML @taliabender @thepycoder @pderrenger Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 028a1c2f064c..96f40e0f040a 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,15 @@
+##
Ultralytics Live - November 29th
+ +
+ +We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 + + +
+ ##
Segmentation ⭐ NEW
From 10c025d794ca395a2ca0b2a00aff65f3a92ecd8d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 14:50:29 -0800 Subject: [PATCH 167/277] Add README License section (#10327) * Add README License section @pderrenger @AyushExel Signed-off-by: Glenn Jocher * live fix Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 96f40e0f040a..53d37d2bcb35 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- To request a commercial license please complete the form at Ultralytics Licensing. + To request an Enterprise License please complete the form at Ultralytics Licensing.

@@ -50,11 +50,11 @@
-##
Ultralytics Live - November 29th
+##
Ultralytics Live Session
-We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥
@@ -432,9 +432,18 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare + +##
License
+ +YOLOv5 is available under two different licenses: + +- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). + + ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact).
From e808f2267d0164edb7bc45588c4fcda68c3dd8cb Mon Sep 17 00:00:00 2001 From: Hu Ye Date: Wed, 30 Nov 2022 11:32:34 +0800 Subject: [PATCH 168/277] Eliminate unused `ConfusionMatrix.matrix()` method (#10309) * fix bug in confusion_matrix Signed-off-by: Hu Ye * Update metrics.py * Update metrics.py * Update metrics.py Signed-off-by: Hu Ye Co-authored-by: Glenn Jocher --- utils/metrics.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 0be462551b89..c01f823a77a1 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -177,9 +177,6 @@ def process_batch(self, detections, labels): if not any(m1 == i): self.matrix[dc, self.nc] += 1 # predicted background - def matrix(self): - return self.matrix - def tp_fp(self): tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives From 7f5724ba4b3e421d4c9162742810c52248d06ecd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Nov 2022 19:38:29 -0800 Subject: [PATCH 169/277] Correct Segmentation Comparison Plot (#10344) @AyushExel @Laughing-q updated plot here in README Addresses https://github.com/ultralytics/yolov5/pull/10245#issuecomment-1328482213 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53d37d2bcb35..dd24a938a060 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ We're excited to announce our very first [Ultralytics Live Session](https://www.
- +
Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. From b412696ff339fc573320f143290d4fb7146832b3 Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Thu, 1 Dec 2022 10:39:24 -0600 Subject: [PATCH 170/277] Fix & speed up segment plot (#10350) * fix plot&&speed up * fix segment save-txt * fix channel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 21 ++++++++++++------ utils/plots.py | 48 ++++++++++++++-------------------------- utils/segment/general.py | 23 +++++++++++++++++++ 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 42389938cee7..4d8458fd879e 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -46,7 +46,7 @@ increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import masks2segments, process_mask +from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -151,13 +151,20 @@ def run( imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): - masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + if retina_masks: + # scale bbox first the crop masks + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC + else: + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] + segments = [ + scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) + for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,9 +172,9 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - annotator.masks(masks, - colors=[colors(x, True) for x in det[:, 5]], - im_gpu=None if retina_masks else im[i]) + plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ + if retina_masks else im[i] + annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): diff --git a/utils/plots.py b/utils/plots.py index 36df271c60e1..d2f232de0e97 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -114,7 +114,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) - def masks(self, masks, colors, im_gpu=None, alpha=0.5): + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] @@ -125,37 +125,21 @@ def masks(self, masks, colors, im_gpu=None, alpha=0.5): if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() - if im_gpu is None: - # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) - if len(masks) == 0: - return - if isinstance(masks, torch.Tensor): - masks = torch.as_tensor(masks, dtype=torch.uint8) - masks = masks.permute(1, 2, 0).contiguous() - masks = masks.cpu().numpy() - # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) - masks = scale_image(masks.shape[:2], masks, self.im.shape) - masks = np.asarray(masks, dtype=np.float32) - colors = np.asarray(colors, dtype=np.float32) # shape(n,3) - s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together - masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) - self.im[:] = masks * alpha + self.im * (1 - s * alpha) - else: - if len(masks) == 0: - self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 - colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 - colors = colors[:, None, None] # shape(n,1,1,3) - masks = masks.unsqueeze(3) # shape(n,h,w,1) - masks_color = masks * (colors * alpha) # shape(n,h,w,3) - - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) - mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) - - im_gpu = im_gpu.flip(dims=[0]) # flip channel - im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255).byte().cpu().numpy() - self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) diff --git a/utils/segment/general.py b/utils/segment/general.py index b526333dc5a1..6ebfd27bd9d3 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -67,6 +67,29 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) +def process_mask_native(protos, masks_in, bboxes, dst_shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new + pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(mh - pad[1]), int(mw - pad[0]) + masks = masks[:, top:bottom, left:right] + + masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] From 028b7cdb5a2e650b4d9e79eaa90a00c1efdcbcba Mon Sep 17 00:00:00 2001 From: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Date: Thu, 1 Dec 2022 22:44:14 +0200 Subject: [PATCH 171/277] fix_reading_nan_in_evolve (#10358) when there is `nan` in evolve.csv pandas read it as str remove the space before fix that Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index c5b738983719..efe8590f85a1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1036,7 +1036,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve # Save yaml with open(evolve_yaml, 'w') as f: - data = pd.read_csv(evolve_csv) + data = pd.read_csv(evolve_csv, skipinitialspace=True) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) From 185d475d93ebd4c03b53b4eb6057a62a52018b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:01:46 -0800 Subject: [PATCH 172/277] Add DNN warning comment (#10368) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 3028e6581e63..928992903b0b 100644 --- a/export.py +++ b/export.py @@ -153,7 +153,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX f, verbose=False, opset_version=opset, - do_constant_folding=True, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False input_names=['images'], output_names=output_names, dynamic_axes=dynamic or None) From 1ce464f6890ed1afe887ab8eed78804ae5933aa8 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Thu, 1 Dec 2022 22:32:55 +0100 Subject: [PATCH 173/277] Add docker info for ClearML remote execution (#10142) * Add docker info for ClearML remote execution * add additional clearml options to handle different python versions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fe5f597a87a6..08aa9fd3327f 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -97,6 +97,11 @@ def __init__(self, opt, hyp): # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent + self.task.set_base_docker("ultralytics/yolov5:latest", + docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', + docker_setup_bash_script='pip install clearml') + # Get ClearML Dataset Version if requested if opt.data.startswith('clearml://'): # data_dict should have the following keys: From 7845cea91343e430566689deff6e50f6c2b473fa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:56:33 -0800 Subject: [PATCH 174/277] Fix ClearML unconfigured error (#10369) @thepycoder adds Try Except for installed but unconfigured clearml environments. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index bc8dd7621579..22da87034f24 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -118,7 +118,14 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # ClearML if clearml and 'clearml' in self.include: - self.clearml = ClearmlLogger(self.opt, self.hyp) + try: + self.clearml = ClearmlLogger(self.opt, self.hyp) + except Exception: + self.clearml = None + prefix = colorstr('ClearML: ') + LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' + f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + else: self.clearml = None From d7955fe438cbc4ca9fd735b79fa99545ffa81575 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 2 Dec 2022 00:00:43 +0100 Subject: [PATCH 175/277] Fix clearml args logging when training is launch with run() (#10359) * Connect opt to clearml args * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update clearml_utils.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 08aa9fd3327f..7ad40ea5f987 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -96,6 +96,7 @@ def __init__(self, opt, hyp): # Only the hyperparameters coming from the yaml config file # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent self.task.set_base_docker("ultralytics/yolov5:latest", From d1ffc3a3a72b438175d3b4cd6e84ef1bc8df2703 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:18:39 -0800 Subject: [PATCH 176/277] Create CITATION.cff (#10387) * Create CITATION.cff @AyushExel @pderrenger new citation file!! :) Signed-off-by: Glenn Jocher * Update CITATION.cff Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 000000000000..f8d5fdc3785d --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,14 @@ +cff-version: 1.2.0 +preferred-citation: + type: software + message: If you use YOLOv5, please cite it as below. + authors: + - family-names: Jocher + given-names: Glenn + orcid: "https://orcid.org/0000-0001-5950-6979" + title: "YOLOv5 by Ultralytics" + version: 7.0.0 + doi: 10.5281/zenodo.3908559 + date-released: 2020-5-29 + license: GPL-3.0 + url: "https://github.com/ultralytics/yolov5" From e96113e48591f246620a3696b7de84423c3c1e42 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:27:45 -0800 Subject: [PATCH 177/277] Update CITATION.cff to version: v7.0 (#10389) Update version: v7.0 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index f8d5fdc3785d..8e2cf1148b92 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -7,7 +7,7 @@ preferred-citation: given-names: Glenn orcid: "https://orcid.org/0000-0001-5950-6979" title: "YOLOv5 by Ultralytics" - version: 7.0.0 + version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 license: GPL-3.0 From a1b6e79ccf0b66f201720d82d79da14bc44bad6d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:28:33 -0800 Subject: [PATCH 178/277] Revert TQDM bar format changes (#10343) Per https://github.com/ultralytics/yolov5/issues/10342 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index efe8590f85a1..99a96576c3fd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -49,7 +49,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') From 9722e6ffe5926fa20387c678d4ca0aef410a0c05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 3 Dec 2022 14:41:08 -0800 Subject: [PATCH 179/277] `process_mask_native()` cleanup (#10366) * process_mask_native() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix arg name * cleanup anno_json * Remove scale_image * Remove scale_image * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update to native Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 17 +++++++++-------- segment/val.py | 10 +++++----- utils/segment/general.py | 20 ++++++++++---------- val.py | 4 ++-- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 4d8458fd879e..4ba9e46ddab0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -44,7 +44,7 @@ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, - strip_optimizer, xyxy2xywh) + strip_optimizer) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -161,10 +161,9 @@ def run( # Segments if save_txt: - segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) - for x in segments] + for x in reversed(masks2segments(masks))] # Print results for c in det[:, 5].unique(): @@ -172,15 +171,17 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ - if retina_masks else im[i] - annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) + annotator.masks( + masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / + 255 if retina_masks else im[i]) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - segj = segments[j].reshape(-1) # (n,2) to (n*2) - line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format + seg = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') diff --git a/segment/val.py b/segment/val.py index 48bf28d4bf4f..368a058f9ced 100644 --- a/segment/val.py +++ b/segment/val.py @@ -48,7 +48,7 @@ from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader -from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode @@ -160,7 +160,7 @@ def run( ): if save_json: check_requirements(['pycocotools']) - process = process_mask_upsample # more accurate + process = process_mask_native # more accurate else: process = process_mask # faster @@ -312,7 +312,7 @@ def run( pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: - plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: @@ -367,8 +367,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) diff --git a/utils/segment/general.py b/utils/segment/general.py index 6ebfd27bd9d3..9da894538665 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -25,10 +25,10 @@ def crop_mask(masks, boxes): def process_mask_upsample(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ @@ -67,25 +67,25 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) -def process_mask_native(protos, masks_in, bboxes, dst_shape): +def process_mask_native(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new - pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(mh - pad[1]), int(mw - pad[0]) masks = masks[:, top:bottom, left:right] - masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) diff --git a/val.py b/val.py index 7c610e83a856..e84249ed383f 100644 --- a/val.py +++ b/val.py @@ -302,8 +302,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) From 5dc1ce4e865960f5b5dfe4e4f5148a4731433bca Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Sat, 3 Dec 2022 16:58:58 -0600 Subject: [PATCH 180/277] Support `.txt` files as a line-by-line media list rather than streams (#10059) * Update streams.txt default Signed-off-by: Colin Wong * Change streams list extension to .streams * Read txt as media per line * Missed one * Missed another one * Update dataloaders.py * Update detect.py * Update dataloaders.py * Update detect.py * Update predict.py * Update predict.py * Update README.md Signed-off-by: Colin Wong Co-authored-by: Glenn Jocher --- README.md | 18 ++++++++++-------- classify/predict.py | 4 +++- detect.py | 4 +++- segment/predict.py | 4 +++- utils/dataloaders.py | 4 +++- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index dd24a938a060..3c163b3e1742 100644 --- a/README.md +++ b/README.md @@ -182,14 +182,16 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc. the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
diff --git a/classify/predict.py b/classify/predict.py index 9a6b00062932..5a5edabda42c 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -74,7 +76,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/detect.py b/detect.py index 58b02802e6d9..2d13401f78bd 100644 --- a/detect.py +++ b/detect.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -82,7 +84,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/segment/predict.py b/segment/predict.py index 4ba9e46ddab0..e9093baa1cc7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -85,7 +87,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cc5f8843ef18..6d2b27ea5e60 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -238,6 +238,8 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -338,7 +340,7 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size From f8539a680041a9f4fbcc4fcdd8f540724da453af Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Mon, 5 Dec 2022 21:12:19 +0100 Subject: [PATCH 181/277] Allow users to specify how to override a ClearML Task (#10363) * Added basic flag to enable reusing last task clearml * Added option to provide task ID to override * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use exist_ok argument instead Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 7ad40ea5f987..3457727a96a4 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -89,6 +89,7 @@ def __init__(self, opt, hyp): task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, + reuse_last_task_id=opt.exist_ok, auto_connect_frameworks={'pytorch': False} # We disconnect pytorch auto-detection, because we added manual model save points in the code ) From 0a1fdcd8ebaebf48d95d795c3693a0148f3ec0f9 Mon Sep 17 00:00:00 2001 From: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Date: Tue, 6 Dec 2022 23:48:17 +0100 Subject: [PATCH 182/277] Add catch for misspelled `--task` (#10420) * Add catch for misspelled task Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- segment/val.py | 2 ++ val.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/segment/val.py b/segment/val.py index 368a058f9ced..5cf8ae8b41c1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -463,6 +463,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": diff --git a/val.py b/val.py index e84249ed383f..8d27d9d3dab1 100644 --- a/val.py +++ b/val.py @@ -399,6 +399,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": From 06243845b3b7f367350ee93323e47740d40e560d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 15:12:20 -0800 Subject: [PATCH 183/277] [pre-commit.ci] pre-commit suggestions (#10409) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) - [github.com/asottile/pyupgrade: v3.2.0 → v3.3.0](https://github.com/asottile/pyupgrade/compare/v3.2.0...v3.3.0) - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) * Fix flake8 ignore syntax Signed-off-by: Glenn Jocher * spacing Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 6 +++--- setup.cfg | 24 ++++++++++-------------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0106b4aab523..72c3cc67e59f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: # - id: end-of-file-fixer - id: trailing-whitespace @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.2.0 + rev: v3.3.0 hooks: - id: pyupgrade name: Upgrade code @@ -58,7 +58,7 @@ repos: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 diff --git a/setup.cfg b/setup.cfg index f12995da3e8e..d7c4cb3e1a4d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,6 @@ license_file = LICENSE description_file = README.md - [tool:pytest] norecursedirs = .git @@ -17,7 +16,6 @@ addopts = --durations=25 --color=yes - [flake8] max-line-length = 120 exclude = .tox,*.egg,build,temp @@ -27,17 +25,16 @@ verbose = 2 # https://pep8.readthedocs.io/en/latest/intro.html#error-codes format = pylint # see: https://www.flake8rules.com/ -ignore = - E731 # Do not assign a lambda expression, use a def - F405 # name may be undefined, or defined from star imports: module - E402 # module level import not at top of file - F401 # module imported but unused - W504 # line break after binary operator - E127 # continuation line over-indented for visual indent - E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ - E501 # line too long - F403 # ‘from module import *’ used; unable to detect undefined names - +ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403 + # E731: Do not assign a lambda expression, use a def + # F405: name may be undefined, or defined from star imports: module + # E402: module level import not at top of file + # F401: module imported but unused + # W504: line break after binary operator + # E127: continuation line over-indented for visual indent + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E501: line too long + # F403: ‘from module import *’ used; unable to detect undefined names [isort] # https://pycqa.github.io/isort/docs/configuration/options.html @@ -45,7 +42,6 @@ line_length = 120 # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html multi_line_output = 0 - [yapf] based_on_style = pep8 spaces_before_comment = 2 From 454dae1301abb3fbf4fd1f54d5dc706cc69f8e7e Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Wed, 7 Dec 2022 00:45:24 +0100 Subject: [PATCH 184/277] Ultralytics Live Session 2 - Roboflow https://youtu.be/LKpuzZllNpA (#10426) * Update README.md Info for Ep 2 of Ultralytics Live Sessions Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update image link Make sure we update the href field in the image so when users click the image they go directly to the YouTube live page. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3c163b3e1742..91ffcb1f95a9 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,10 @@
-We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - + +
##
Segmentation ⭐ NEW
From de812396fe94996cfc0e8c75cfdcc446b61e3439 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:25:45 -0800 Subject: [PATCH 185/277] Add README App section (#10446) * Add README App section @AyushExel @pderrenger this should increase our app visibility per https://github.com/ultralytics/yolov5/issues/10431 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 91ffcb1f95a9..f00cb76c6ce9 100644 --- a/README.md +++ b/README.md @@ -427,6 +427,13 @@ Get started in seconds with our verified environments. Click each icon below for
+##
App
+ +Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
Contribute
From 1607aec4312719db820a026792223acad915015f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:27:36 -0800 Subject: [PATCH 186/277] Automatic README translation to Simplified Chinese (#10445) * Create translate-readme.yml @AyushExel @pderrenger @Laughing-q adding README translation action since we are unable to manually maintain our Chinese-translated README Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Double hyperlinks Signed-off-by: Glenn Jocher * Delete README_cn.md Signed-off-by: Glenn Jocher * Create README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 344 ------------------ .github/workflows/translate-readme.yml | 27 ++ .pre-commit-config.yaml | 2 +- README.md | 17 +- README.zh-CN.md | 479 +++++++++++++++++++++++++ 5 files changed, 513 insertions(+), 356 deletions(-) delete mode 100644 .github/README_cn.md create mode 100644 .github/workflows/translate-readme.yml create mode 100644 README.zh-CN.md diff --git a/.github/README_cn.md b/.github/README_cn.md deleted file mode 100644 index 0a2f61ee35b2..000000000000 --- a/.github/README_cn.md +++ /dev/null @@ -1,344 +0,0 @@ -
-

- - -

- - [English](../README.md) | 简体中文 -
-
- YOLOv5 CI - YOLOv5 Citation - Docker Pulls -
- Run on Gradient - Open In Colab - Open In Kaggle -
- -
-

- YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 -

- -
- - - - - - - - - - - - - - - - - - - - -
-
- - -##
文件
- -请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关训练、测试和部署的完整文件。 - -##
快速开始案例
- -
-安装 - -在[**Python>=3.7.0**](https://www.python.org/) 的环境中克隆版本仓并安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt),包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/)。 -```bash -git clone https://github.com/ultralytics/yolov5 # 克隆 -cd yolov5 -pip install -r requirements.txt # 安装 -``` - -
- -
-推理 - -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 - -```python -import torch - -# 模型 -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom - -# 图像 -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# 推理 -results = model(img) - -# 结果 -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
- -
-用 detect.py 进行推理 - -`detect.py` 在各种数据源上运行推理, 其会从最新的 YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并将检测结果保存到 `runs/detect` 目录。 - -```bash -python detect.py --source 0 # 网络摄像头 - img.jpg # 图像 - vid.mp4 # 视频 - path/ # 文件夹 - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP 流 -``` - -
- -
-训练 - -以下指令再现了 YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 - -```bash -python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
- -
-教程 - -- [训练自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 -- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ - 推荐 -- [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 新 -- [TFLite, ONNX, CoreML, TensorRT 输出](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) 🌟 新 -- [使用Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) -- [Roboflow:数据集,标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 -- [使用ClearML 记录实验](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 新 - -
- - -##
Integrations
- -
- - -
-
- -
- - - - - - - - - - - -
- -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| - - -##
Ultralytics HUB
- -[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - - - - - -##
为什么选择 YOLOv5
- -

-
- YOLOv5-P5 640 图像 (点击扩展) - -

-
-
- 图片注释 (点击扩展) - -- **COCO AP val** 表示 mAP@0.5:0.95 在5000张图像的[COCO val2017](http://cocodataset.org)数据集上,在256到1536的不同推理大小上测量的指标。 -- **GPU Speed** 衡量的是在 [COCO val2017](http://cocodataset.org) 数据集上使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例在批量大小为32时每张图像的平均推理时间。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小设置为 8。 -- 复现 mAP 方法: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` - -
- -### 预训练检查点 - -| 模型 | 规模
(像素) | mAP验证
0.5:0.95 | mAP验证
0.5 | 速度
CPU b1
(ms) | 速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数
(M) | 浮点运算
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | - -
- 表格注释 (点击扩展) - -- 所有检查点都以默认设置训练到300个时期. Nano和Small模型用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, 其他模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **mAPval** 值是 [COCO val2017](http://cocodataset.org) 数据集上的单模型单尺度的值。 -
复现方法: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- 使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) 实例对COCO val图像的平均速度。不包括NMS时间(~1 ms/img) -
复现方法: `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和比例增强. -
复现方法: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
- - -##
分类 ⭐ 新
- -YOLOv5发布的[v6.2版本](https://github.com/ultralytics/yolov5/releases) 支持训练,验证,预测和输出分类模型!这使得训练分类器模型非常简单。点击下面开始尝试! - -
- 分类检查点 (点击展开) - -
- -我们在ImageNet上使用了4xA100的实例训练YOLOv5-cls分类模型90个epochs,并以相同的默认设置同时训练了ResNet和EfficientNet模型来进行比较。我们将所有的模型导出到ONNX FP32进行CPU速度测试,又导出到TensorRT FP16进行GPU速度测试。最后,为了方便重现,我们在[Google Colab Pro](https://colab.research.google.com/signup)上进行了所有的速度测试。 - -| 模型 | 规模
(像素) | 准确度
第一 | 准确度
前五 | 训练
90 epochs
4xA100 (小时) | 速度
ONNX CPU
(ms) | 速度
TensorRT V100
(ms) | 参数
(M) | 浮点运算
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | - -
- 表格注释 (点击扩展) - -- 所有检查点都被SGD优化器训练到90 epochs, `lr0=0.001` 和 `weight_decay=5e-5`, 图像大小为224,全为默认设置。
运行数据记录于 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2。 -- **准确度** 值为[ImageNet-1k](https://www.image-net.org/index.php)数据集上的单模型单尺度。
通过`python classify/val.py --data ../datasets/imagenet --img 224`进行复制。 -- 使用Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM实例得出的100张推理图像的平均**速度**。
通过 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`进行复制。 -- 用`export.py`**导出**到FP32的ONNX和FP16的TensorRT。
通过 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`进行复制。 -
-
- -
- 分类使用实例 (点击展开) - -### 训练 -YOLOv5分类训练支持自动下载MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof和ImageNet数据集,并使用`--data` 参数. 打个比方,在MNIST上使用`--data mnist`开始训练。 - -```bash -# 单GPU -python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 - -# 多-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 -``` - -### 验证 -在ImageNet-1k数据集上验证YOLOv5m-cl的准确性: -```bash -bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate -``` - -### 预测 -用提前训练好的YOLOv5s-cls.pt去预测bus.jpg: -```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg -``` -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub -``` - -### 导出 -导出一组训练好的YOLOv5s-cls, ResNet和EfficientNet模型到ONNX和TensorRT: -```bash -python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 -``` -
- - -##
贡献
- -我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! - - - - -##
联系
- -关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。商业咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。 - -
-
- - - - - - - - - - - - - - - - - - - - -
- -[assets]: https://github.com/ultralytics/yolov5/releases -[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml new file mode 100644 index 000000000000..76f59b83e65f --- /dev/null +++ b/.github/workflows/translate-readme.yml @@ -0,0 +1,27 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md + +name: Translate README + +on: + push: + branches: + - main + - master + paths: + - README.md + +jobs: + Translate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: 16 + # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + - name: Adding README - Chinese Simplified + uses: dephraiim/translate-readme@main + with: + LANG: zh-CN diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72c3cc67e59f..28dbc89223cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README_cn.md" + exclude: "README.md|README.zh-CN.md" - repo: https://github.com/asottile/yesqa rev: v1.4.0 diff --git a/README.md b/README.md index f00cb76c6ce9..9ee97321082e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

- English | [简体中文](.github/README_cn.md) + [English](README.md) | [简体中文](README.zh-CN.md)
YOLOv5 CI @@ -15,15 +15,11 @@ Open In Colab Open In Kaggle
-
-

- YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- To request an Enterprise License please complete the form at Ultralytics Licensing. -

-

+ +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + +To request an Enterprise License please complete the form at Ultralytics Licensing.
@@ -313,7 +309,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -479,5 +475,4 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
-[assets]: https://github.com/ultralytics/yolov5/releases [tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md new file mode 100644 index 000000000000..09cfc9472d9a --- /dev/null +++ b/README.zh-CN.md @@ -0,0 +1,479 @@ +
+

+ + +

+ +[英语](README.md)|[简体中文](README.zh-CN.md)
+ +
+ YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 + +要申请企业许可证,请填写表格Ultralytics 许可. + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ +##
Ultralytics 现场会议
+ +
+ +[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 + + + +
+ +##
细分 ⭐ 新
+ +
+ + +
+ +我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 + +
+ Segmentation Checkpoints + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 + +| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | +| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) +- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### 火车 + +YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 瓦尔 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 出口 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
文档
+ +见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 + +
+Install + +克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+Inference + +YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom + +# Images +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+Inference with detect.py + +`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 +最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. + +```bash +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+Training + +下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 +V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 +最大的`--batch-size`可能,或通过`--batch-size -1`为了 +YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 + +```bash +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+Tutorials + +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 + +
+ +##
集成
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | +| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | + +##
Ultralytics 集线器
+ +[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! + + + + +##
为什么选择 YOLOv5
+ +YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 + +

+
+ YOLOv5-P5 640 Figure + +

+
+
+ Figure Notes + +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### 预训练检查点 + +| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | +| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ Table Notes + +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
分类⭐新
+ +YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 + +
+ Classification Checkpoints + +
+ +我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples  Open In Colab + +### 火车 + +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### 瓦尔 + +在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: + +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate +``` + +### 预测 + +使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: + +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub +``` + +### 出口 + +将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` + +
+ +##
环境
+ +在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 + +
+ + + + + + + + + + + + + + + + + +
+ +##
贡献
+ +我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! + + + + + +##
执照
+ +YOLOv5 在两种不同的许可下可用: + +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). + +##
接触
+ +对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ +[tta]: https://github.com/ultralytics/yolov5/issues/303 From 342fe05e6c88221750ce7e90b7d2e8baabd397dc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 01:47:14 +0000 Subject: [PATCH 187/277] docs: Added README."zh-CN".md translation via https://github.com/dephraiim/translate-readme --- README.zh-CN.md | 135 +++++++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 66 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 09cfc9472d9a..0fc77565c5ef 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英语](README.md)|[简体中文](README.zh-CN.md)
+[英语](README.md)\|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -79,10 +79,10 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Tutorials -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
@@ -265,7 +263,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | | :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | | 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | @@ -289,10 +287,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Figure Notes -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -315,10 +313,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Table Notes -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -333,33 +331,33 @@ YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分 我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | +| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
Classification Usage Examples  Open In Colab @@ -394,9 +392,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` ### 出口 @@ -433,6 +429,13 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
+##
应用程序
+ +在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
贡献
我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -445,8 +448,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的许可下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). ##
接触
From 443ef7f33e0943ccc5e5c8ff922c6fe7a0cb7053 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 12 Dec 2022 12:29:19 +0900 Subject: [PATCH 188/277] Modify a comment for OpenCV File I/O Functions (#10467) Modify comment for OpenCV File I/O Functions Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 99a96576c3fd..e5a843c4a758 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1115,7 +1115,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): return path -# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------ imshow_ = cv2.imshow # copy to avoid recursion errors From 357cde9ee7da13ba3095995488c5a23631467f1a Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 13 Dec 2022 05:05:20 +0900 Subject: [PATCH 189/277] add force_reload=True when loading model using torch hub (#10460) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 657dc266da92..6ab0a33366a5 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -963,7 +963,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 1752768fb3b3ff4f842eaaecf7eba4808ac124a9 Mon Sep 17 00:00:00 2001 From: Nioolek <40284075+Nioolek@users.noreply.github.com> Date: Wed, 14 Dec 2022 06:48:15 +0800 Subject: [PATCH 190/277] Fix Chinese README (#10465) * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * add blank * Update translate-readme.yml Disable auto-translation by changing on-push branch to 'translate_readme'. This prevents overwriting of manual fixes. Signed-off-by: Glenn Jocher * Update translate-readme.yml Signed-off-by: Glenn Jocher * fix live doc * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 3 +- README.md | 4 +- README.zh-CN.md | 260 ++++++++++++------------- 3 files changed, 133 insertions(+), 134 deletions(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 76f59b83e65f..538ff375097e 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -6,8 +6,7 @@ name: Translate README on: push: branches: - - main - - master + - translate_readme # replace with 'master' to enable action paths: - README.md diff --git a/README.md b/README.md index 9ee97321082e..21bdc83f349e 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,9 @@ To request an Enterprise License please complete the form at
-[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index 0fc77565c5ef..15232be3aa4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[英语](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)\|[简体中文](README.zh-CN.md)
YOLOv5 CI @@ -17,9 +17,9 @@

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -要申请企业许可证,请填写表格Ultralytics 许可. +如果要申请企业许可证,请填写表格Ultralytics 许可. +##
Ultralytics 直播会议
-[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ 将与 [Roboflow](https://roboflow.com/?ref=ultralytics) 的 [Joseph Nelson](https://github.com/josephofiowa) 在 **欧洲中部时间 12 月 13 日星期二的 19:00** ,他将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。欢迎收听 Glenn 和 Joseph ,以了解如何通过无缝数据集集成来加快工作流程! 🔥 - +
-##
细分 ⭐ 新
+##
实例分割模型 ⭐ 新
-我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
- Segmentation Checkpoints + 实例分割模型列表
-我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 -| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | -| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | --------------------------------------------- | --------------------------------- | --------------------------------- | ----------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | -- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) -- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
- Segmentation Usage Examples  Open In Colab + 分割模型使用示例  Open In Colab -### 火车 +### 训练 -YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 ```bash -# Single-GPU +# 单 GPU python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 COCO 数据集上验证 YOLOv5s-seg mask mAP: ```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 ``` ### 预测 @@ -119,13 +119,13 @@ python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) ``` | ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -### 出口 +### 模型导出 将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: @@ -137,12 +137,12 @@ python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --devi ##
文档
-见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
-Install +安装 -克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone @@ -153,10 +153,10 @@ pip install -r requirements.txt # install
-Inference +推理 -YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). +使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -177,10 +177,10 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-Inference with detect.py +使用 detect.py 推理 -`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 -最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 +最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -198,13 +198,14 @@ python detect.py --weights yolov5s.pt --source 0 #
-Training +训练 -下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 -V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 -最大的`--batch-size`可能,或通过`--batch-size -1`为了 -YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 +最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 +YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -219,16 +220,15 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-Tutorials +教程 - [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 - [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 - [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 - [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) @@ -236,12 +236,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 - [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 - [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
-##
集成
+##
模块集成

@@ -263,118 +263,118 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | -| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | -| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | -##
Ultralytics 集线器
+##
Ultralytics HUB
-[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! ##
为什么选择 YOLOv5
-YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 +YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。

- YOLOv5-P5 640 Figure + YOLOv5-P5 640 图

- Figure Notes + 图表笔记 -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
-### 预训练检查点 - -| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | -| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +### 预训练模型 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes + 笔记 -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-##
分类⭐新
+##
分类网络 ⭐ 新
-YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。
- Classification Checkpoints + 分类网络模型
-我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 - -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | -| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + Table Notes (点击以展开) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
- Classification Usage Examples  Open In Colab + 分类训练示例  Open In Colab -### 火车 +### 训练 -YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。 ```bash -# Single-GPU +# 单 GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: @@ -395,7 +395,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### 出口 +### 模型导出 将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: @@ -407,7 +407,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
环境
-在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 +使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。 -##
应用程序
+##
APP
-在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! +通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! Ultralytics mobile app ##
贡献
-我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
-##
执照
+##
License
-YOLOv5 在两种不同的许可下可用: +YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 -##
接触
+##
联系我们
-对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). +若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。
From 1ae91940abe9ca3e064784bb18c12271ab3157b4 Mon Sep 17 00:00:00 2001 From: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Date: Thu, 15 Dec 2022 07:56:42 -0500 Subject: [PATCH 191/277] Update Comet hyperlinks (#10500) * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 2 +- classify/tutorial.ipynb | 4 ++-- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 2 +- utils/loggers/comet/README.md | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 21bdc83f349e..56015b239fc9 100644 --- a/README.md +++ b/README.md @@ -264,7 +264,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Ultralytics HUB
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index c6f5d0d88a2d..94bafba00204 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1341,7 +1341,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1476,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 09ca963d4b98..e1179ffc1cc6 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -454,7 +454,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 6ab0a33366a5..cebcee3dfd24 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -860,7 +860,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8f206cd9830e..8a361e2b211d 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -54,7 +54,7 @@ That's it! Comet will automatically log your hyperparameters, command line argum yolo-ui # Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -119,7 +119,7 @@ You can control the frequency of logged predictions and the associated images by **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) ```shell @@ -161,7 +161,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ## Uploading a Dataset to Comet Artifacts -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. @@ -251,6 +251,6 @@ comet optimizer -j utils/loggers/comet/hpo.py \ ### Visualizing Results -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) hyperparameter-yolo From b564c1f3653a9b11038a80e348a34afbf59943be Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:05:00 +0900 Subject: [PATCH 192/277] Check `conf_thres` and `iou_thres` prior to use (#10515) * Checks conf_thres and iou_thres at beign Why checks conf_thres after operation with it? Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index e5a843c4a758..6145801ca47f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -898,6 +898,9 @@ def non_max_suppression( list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output @@ -909,10 +912,6 @@ def non_max_suppression( nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height From 8d65f9d8ce274f78949ab88b7359580cc8cabacc Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:10:26 +0900 Subject: [PATCH 193/277] Support extensive shape for functions related to bounding box localization (#10516) * support extensive shape for functions related to bounding box localization Signed-off-by: Yonghye Kwon * merge exp branch updates Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 54 ++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6145801ca47f..744abb439ed1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -750,30 +750,30 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y @@ -782,18 +782,18 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): if clip: clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y return y @@ -833,9 +833,9 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes @@ -862,13 +862,13 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F def clip_boxes(boxes, shape): # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def clip_segments(segments, shape): From b2f94e8c356083bb85d76a60ea2b54d5ad9fbe36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Dec 2022 12:26:57 +0100 Subject: [PATCH 194/277] Update to ONNX opset 17 (#10522) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 928992903b0b..baf86f1d9297 100644 --- a/export.py +++ b/export.py @@ -624,7 +624,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') From 43623729cc634d690cece1f1d015e4d59e0b9d98 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Sat, 17 Dec 2022 19:55:08 +0800 Subject: [PATCH 195/277] Update train.py (#10485) Setting `master_port` to 1 may cause `Permission denied` due to failure to bind the port. So it is better to set it to a port greater than 1024. Signed-off-by: Wang Xin Signed-off-by: Wang Xin Co-authored-by: Ayush Chaurasia --- classify/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index a50845a4f781..4767be77bd61 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt From 2c35c1b318ecd4856275039220c052a976d2cfe2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sun, 18 Dec 2022 21:03:01 +0900 Subject: [PATCH 196/277] Limit detections without explicit if condition (#10502) * limit detections without explicit if condition Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup indexing code for limit detections Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 744abb439ed1..70b6f6446f23 100644 --- a/utils/general.py +++ b/utils/general.py @@ -978,8 +978,7 @@ def non_max_suppression( c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] + i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix From b2a0f1cdc579bd81b3c4543752abaa4a90a53c8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Dec 2022 20:06:01 +0100 Subject: [PATCH 197/277] Update `onnx>=1.12.0` (#10526) --- export.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index baf86f1d9297..7910178b2338 100644 --- a/export.py +++ b/export.py @@ -132,7 +132,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements('onnx') + check_requirements('onnx>=1.12.0') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') diff --git a/requirements.txt b/requirements.txt index 85eb839df8a0..4a8649c696a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.9.0 # ONNX export +# onnx>=1.12.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export From 10e93d295fed1459666409751b4a897521c31b90 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 19 Dec 2022 18:27:34 +0900 Subject: [PATCH 198/277] Set a seed of generator with an option for more randomness when training several models with different seeds (#10486) * set seed with parameter Signed-off-by: Yonghye Kwon * make seed to be a large number * set seed with a parameter * set a seed of dataloader with opt for more randomness Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- train.py | 3 ++- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 8b5446e58f2d..5d75f22b6335 100644 --- a/train.py +++ b/train.py @@ -198,7 +198,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), - shuffle=True) + shuffle=True, + seed=opt.seed) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6d2b27ea5e60..302cc3300d35 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -115,7 +115,8 @@ def create_dataloader(path, image_weights=False, quad=False, prefix='', - shuffle=False): + shuffle=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -140,7 +141,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9de6f0fbf903..d66b36115e3f 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,8 @@ def create_dataloader(path, prefix='', shuffle=False, mask_downsample_ratio=1, - overlap_mask=False): + overlap_mask=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -64,7 +65,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, From 5545ff3545d886417b4eff12203d1af4d758cc10 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 20 Dec 2022 01:19:14 +0900 Subject: [PATCH 199/277] Sort by confidence and remove excess boxes without explicit if (#10517) * sort by confidence and remove excess boxes without explicit if Signed-off-by: Yonghye Kwon * cleanup indexing boxes for remove excess boxes it is related to https://github.com/ultralytics/yolov5/pull/10502. Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 70b6f6446f23..0bbcb6e7334c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -969,10 +969,7 @@ def non_max_suppression( n = x.shape[0] # number of boxes if not n: # no boxes continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes From f72f0fec980b35d7f9575d15b326f529b5a9ac0d Mon Sep 17 00:00:00 2001 From: Amir Pourmand Date: Tue, 20 Dec 2022 18:37:43 +0330 Subject: [PATCH 200/277] Add Albumentation Default hyperparameter file (#10529) * add albumentation hyps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename hyp.noAugmentation.yaml to hyp.no-augmentation.yaml * Update hyp.no-augmentation.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- data/hyps/hyp.no-augmentation.yaml | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 data/hyps/hyp.no-augmentation.yaml diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 000000000000..8fbd5b262afa --- /dev/null +++ b/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) From 887d95296642b2fdee1cafa80c0c59618ca3c2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:17:19 +0100 Subject: [PATCH 201/277] Created using Colaboratory --- segment/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index e1179ffc1cc6..dc6599415480 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -264,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -454,7 +454,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -462,11 +463,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -590,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From c765b8c274c78676ae351f159953652152725fcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:18:09 +0100 Subject: [PATCH 202/277] Created using Colaboratory --- classify/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 94bafba00204..06af62a1b4c1 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -183,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1269,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1341,7 +1341,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1349,11 +1350,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -1476,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 96a71b17a276fa0a0b6fbdf68d579ce0603bfa2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:19:45 +0100 Subject: [PATCH 203/277] Created using Colaboratory --- tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cebcee3dfd24..e83617e9dce7 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -412,7 +412,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -682,7 +682,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -860,7 +860,8 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -868,11 +869,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +973,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2370a5513ebf67bd10b8d15fd6353e008380bc43 Mon Sep 17 00:00:00 2001 From: "Mr.Li" <1055271769@qq.com> Date: Thu, 22 Dec 2022 21:55:09 +0800 Subject: [PATCH 204/277] Bugfix: update dataloaders.py to fix "resize to 0" (#10558) * fix bug "resize to 0" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use math.ceil() for resize to enforce min floor of 1 pixel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 302cc3300d35..cbb3114e94d8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -737,7 +737,7 @@ def load_image(self, i): r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized From 5f8054c47c4938c6df6c3f1344de774f15a18404 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Dec 2022 18:15:33 +0100 Subject: [PATCH 205/277] FROM nvcr.io/nvidia/pytorch:22.12-py3 (#10588) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1ecf4c64f75f..26b3439c1941 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.11-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 3c1afd9ab69f289f46f6ad291e7be3cae15f6c35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Dec 2022 14:54:43 +0100 Subject: [PATCH 206/277] ENV OMP_NUM_THREADS=1 (#10593) @Laughing-q @AyushExel setting to 1 due to recent issues Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 26b3439c1941..e0d4411118f0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -29,7 +29,7 @@ WORKDIR /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables -ENV OMP_NUM_THREADS=8 +ENV OMP_NUM_THREADS=1 # Usage Examples ------------------------------------------------------------------------------------------------------- From e72dc1fabaaa47273a825f35ba3a8884bcc2e16b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 14:32:21 +0100 Subject: [PATCH 207/277] Dockerfile uninstall torch nightly in favor of stable (#10604) @AyushExel @Laughing-q fix for Docker error ``` AttributeError: Can't get attribute '_rebuild_parameter_v2' on ``` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e0d4411118f0..abc3da0ee502 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -12,10 +12,10 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx -# Install pip packages +# Install pip packages (uninstall torch nightly in favor of stable) COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip uninstall -y Pillow torchtext torch torchvision RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From b1e997642cec09f55ce71af8af874b9e7463aeba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Dec 2022 14:36:32 +0100 Subject: [PATCH 208/277] Bump actions/stale from 6 to 7 (#10590) Bumps [actions/stale](https://github.com/actions/stale) from 6 to 7. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v6...v7) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9067c343608b..b21e9c00e6c5 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v6 + - uses: actions/stale@v7 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From 8ca182613499c323a411f559b7b5ea072122c897 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 15:41:03 +0100 Subject: [PATCH 209/277] Update `pycocotools>=2.0.6` (#10605) * Update `pycocotools>=2.0.6` Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- segment/val.py | 2 +- utils/docker/Dockerfile | 4 ++-- val.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4a8649c696a8..c6bd0f26cabb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -44,6 +44,6 @@ seaborn>=0.11.0 # Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0.6 # COCO mAP # roboflow # ultralytics # HUB https://hub.ultralytics.com diff --git a/segment/val.py b/segment/val.py index 5cf8ae8b41c1..248d2bee9be1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -159,7 +159,7 @@ def run( callbacks=Callbacks(), ): if save_json: - check_requirements(['pycocotools']) + check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index abc3da0ee502..6f9de5208e7f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,8 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ + Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory diff --git a/val.py b/val.py index 8d27d9d3dab1..599aa1afdd4a 100644 --- a/val.py +++ b/val.py @@ -309,7 +309,7 @@ def run( json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') + check_requirements('pycocotools>=2.0.6') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval From 65071da7181e2ede9d3514f20c88e6bd646af07c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 16:47:26 +0100 Subject: [PATCH 210/277] Update Dockerfile `pip install -U pycocotools` (#10606) * Update Dockerfile `pip install -U pycocotools` Previous command not working. Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 6f9de5208e7f..98e9c2927b87 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,9 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip install --no-cache -U pycocotools # install --upgrade RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ + Pillow>=9.1.0 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory From a389bff3cb0209c4f74c512fc340a414056fc45d Mon Sep 17 00:00:00 2001 From: Hisam Fahri Date: Tue, 3 Jan 2023 03:09:02 +0700 Subject: [PATCH 211/277] docs: remove past Ultralytics Live Session event from readme (#10635) Signed-off-by: Hisam Fahri Signed-off-by: Hisam Fahri --- README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/README.md b/README.md index 56015b239fc9..c32f3d6fe4ae 100644 --- a/README.md +++ b/README.md @@ -45,17 +45,6 @@ To request an Enterprise License please complete the form at Ultralytics Live Session
- - - ##
Segmentation ⭐ NEW
From 632bf485b4ab2adbaef71f4eced5e6b59ecef7e2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 3 Jan 2023 05:10:13 +0900 Subject: [PATCH 212/277] Remove rocket emoji causes cp949 codec errors (#10646) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c6bd0f26cabb..c0e4a91d7dd1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -# YOLOv5 🚀 requirements +# YOLOv5 requirements # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ From c0ca1d21f24ced15fcc3ec6e80f5e55d78fde9d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 3 Jan 2023 19:21:31 +0100 Subject: [PATCH 213/277] `ultralytics/assets` update `master` to `main` (#10663) --- README.md | 74 ++++++++++++++++++++--------------------- README.zh-CN.md | 74 ++++++++++++++++++++--------------------- classify/tutorial.ipynb | 4 +-- segment/tutorial.ipynb | 4 +-- tutorial.ipynb | 4 +-- 5 files changed, 80 insertions(+), 80 deletions(-) diff --git a/README.md b/README.md index c32f3d6fe4ae..8044252cb74b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) @@ -23,25 +23,25 @@ To request an Enterprise License please complete the form at - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -233,20 +233,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -261,7 +261,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
@@ -395,19 +395,19 @@ Get started in seconds with our verified environments. Click each icon below for
- + - + - + - + - +
@@ -443,25 +443,25 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md index 15232be3aa4f..ab76afbc5252 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)\|[简体中文](README.zh-CN.md)
@@ -23,25 +23,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -245,20 +245,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -272,7 +272,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -412,19 +412,19 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
- + - + - + - + - +
@@ -458,25 +458,25 @@ YOLOv5 在两种不同的 License 下可用:
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 06af62a1b4c1..03c1dd0bc0de 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1222,7 +1222,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index dc6599415480..cb1af34d9f17 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -216,7 +216,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index e83617e9dce7..6308898b8b71 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -622,7 +622,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 9fcbf93a1f0afacecb8b41b86fb1304db1942928 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 18:45:31 +0100 Subject: [PATCH 214/277] Created using Colaboratory --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6308898b8b71..c320d699a940 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -651,17 +651,17 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", + "if logger == 'ClearML':\n", + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", "elif logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()" + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ], "metadata": { "id": "i3oKtE4g-aNn" From 79c05e5689817645bb12b7f77a3d8318582c0f05 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 7 Jan 2023 00:19:14 +0530 Subject: [PATCH 215/277] Add Neural Magic DeepSparse tutorial to README (#10698) * Update README.md Signed-off-by: Ayush Chaurasia * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 8044252cb74b..a2245db46c14 100644 --- a/README.md +++ b/README.md @@ -223,7 +223,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW - [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
@@ -247,13 +247,13 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - - + + -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| ##
Ultralytics HUB
From fdc35b119ad21c7f205596dbb238f780c87040ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 20:04:42 +0100 Subject: [PATCH 216/277] Update Ultralytics App banner URL (#10704) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a2245db46c14..80a4da2ade8e 100644 --- a/README.md +++ b/README.md @@ -417,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! -Ultralytics mobile app +Ultralytics mobile app ##
Contribute
From 1ea901bd5257e8688a122a27afcb21d74b7c5fbc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 14:42:57 +0100 Subject: [PATCH 217/277] Migrate policies to ultralytics/.github (#10721) --- .github/CODE_OF_CONDUCT.md | 128 ------------------------------------- .github/SECURITY.md | 7 -- 2 files changed, 135 deletions(-) delete mode 100644 .github/CODE_OF_CONDUCT.md delete mode 100644 .github/SECURITY.md diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 27e59e9aab38..000000000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# YOLOv5 🚀 Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -- Demonstrating empathy and kindness toward other people -- Being respectful of differing opinions, viewpoints, and experiences -- Giving and gracefully accepting constructive feedback -- Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -- Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -- The use of sexualized language or imagery, and sexual attention or - advances of any kind -- Trolling, insulting or derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others' private information, such as a physical or email - address, without their explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -hello@ultralytics.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. - -[homepage]: https://www.contributor-covenant.org diff --git a/.github/SECURITY.md b/.github/SECURITY.md deleted file mode 100644 index aa3e8409da6b..000000000000 --- a/.github/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. - -### Reporting a Vulnerability - -To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! From caba2aed4a6c2ad85712acb7cb1dd22ed886dc95 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 20:35:02 +0100 Subject: [PATCH 218/277] Update translate-readme.yml (#10725) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 538ff375097e..2bb351ec7e81 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -19,7 +19,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 16 - # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + # ISO Language Codes: https://cloud.google.com/translate/docs/languages - name: Adding README - Chinese Simplified uses: dephraiim/translate-readme@main with: From 37d1e5e5df33f4a9bef75661e5a075927b058540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:40:17 +0800 Subject: [PATCH 219/277] Update some Chinese content of Neural Magic (#10727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index ab76afbc5252..8c6efadfd242 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -45,16 +45,6 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表
Ultralytics 直播会议 - - - ##
实例分割模型 ⭐ 新
@@ -260,12 +250,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - +
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From cdd804d39ff84b413bde36a84006f51769b6043b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 22:05:41 +0800 Subject: [PATCH 220/277] Fix logo-neuralmagic.png image link (#10731) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 8c6efadfd242..c406f35820a7 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -249,7 +249,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From 0e24b7e2f584beea3f573ddb82c3b93558daeb1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 17:43:12 +0100 Subject: [PATCH 221/277] PIL `.get_size()` deprecation fix (#10754) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d2f232de0e97..41a387200ba4 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width, height + _, _, w, h = self.font.getbbox(label) # text width, height outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From bd10f0f6c72d3a0135b72f31b51057eb74c116eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 18:01:36 +0100 Subject: [PATCH 222/277] Revert PIL deprecation fix Signed-off-by: Glenn Jocher --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 41a387200ba4..f84aed9fb5c7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,8 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - _, _, w, h = self.font.getbbox(label) # text width, height + w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 + # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From 9650f16f41248b24a72276e2287185350939285d Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Fri, 13 Jan 2023 02:35:05 +0800 Subject: [PATCH 223/277] Ignore *_paddle_model/ dir (#10745) Signed-off-by: Wang Xin Signed-off-by: Wang Xin --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 69a00843ea42..6bcedfac610d 100755 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ VOC/ *_saved_model/ *_web_model/ *_openvino_model/ +*_paddle_model/ darknet53.conv.74 yolov3-tiny.conv.15 From 2b356c0ab24bc945d69ab66b67e8af755697b611 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Jan 2023 12:40:15 +0100 Subject: [PATCH 224/277] Update Dockerfile (#10768) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 98e9c2927b87..c8b88357cb6d 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -54,7 +54,7 @@ ENV OMP_NUM_THREADS=1 # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up -# docker system prune -a --volumes +# sudo docker system prune -a --volumes # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ From 3a059125dd7b01c76b7a02b59814ed5bc32d9ac4 Mon Sep 17 00:00:00 2001 From: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Date: Sat, 14 Jan 2023 21:43:27 +0800 Subject: [PATCH 225/277] Fx confusion-matrix xlabel typo (#10692) fix confusion-matrix xlabel typo Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index c01f823a77a1..7fb077774384 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -208,7 +208,7 @@ def plot(self, normalize=True, save_dir='', names=()): vmin=0.0, xticklabels=ticklabels, yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_ylabel('True') + ax.set_xlabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) From 3b6e27ad0ad990cc69c519e969a6094aacfb9e3e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 14:46:56 +0100 Subject: [PATCH 226/277] [pre-commit.ci] pre-commit suggestions (#10655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.3.0 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.3.0...v3.3.1) - [github.com/PyCQA/isort: 5.10.1 → 5.11.4](https://github.com/PyCQA/isort/compare/5.10.1...5.11.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28dbc89223cf..f7ae077ee272 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,14 +24,14 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade name: Upgrade code args: [ --py37-plus ] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.11.4 hooks: - id: isort name: Sort imports From 589edc7b012d45a5c8ad6231d7716f88cb6e43ca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Jan 2023 18:43:06 +0100 Subject: [PATCH 227/277] Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 (#10769) * Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 Ultralytics Live Session Ep.3 is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, image segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Co-authored-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 80a4da2ade8e..399ebe5666e2 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,20 @@ To request an Enterprise License please complete the form at Ultralytics Live Session + +
+ +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. + +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. + +Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 + + + +
+ ##
Segmentation ⭐ NEW
From c442a2e99321ebd72b242bc961824f82d46e4fd3 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:40:03 +0100 Subject: [PATCH 228/277] Update Ultralytics Live Session 3 - https://youtu.be/IPcpYO5ITa8 (#10782) * Update Date of Ultralytics Live Session 3 Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 399ebe5666e2..f19130f6f094 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,14 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. -Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 +To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - +
##
Segmentation ⭐ NEW
From 064365d8683fd002e9ad789c1e91fa3d021b44f0 Mon Sep 17 00:00:00 2001 From: Johan Bergman <35481994+duran67@users.noreply.github.com> Date: Fri, 20 Jan 2023 23:49:43 +0100 Subject: [PATCH 229/277] Update parse_opt() in export.py to work as in train.py (#10789) Update parse_opt() to work as in train.py Change parse_opt() be able to use parse_known_args(), same as in train.py, so export.main() can be called from other script without error. e.g.: from yolov5 import export opt = export.parse_opt(True) opt.weights = opt.include = ("torchscript", "onnx") opt.data = opt.imgsz = [, ] export.main(opt) Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 7910178b2338..9ca3441bc66a 100644 --- a/export.py +++ b/export.py @@ -610,7 +610,7 @@ def run( return f # return list of exported files/dirs -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') @@ -638,7 +638,7 @@ def parse_opt(): nargs='+', default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() print_args(vars(opt)) return opt From 6a62c94190583cca257bb091c6ced9d9c3b2dd3d Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Sat, 4 Feb 2023 20:21:30 +0800 Subject: [PATCH 230/277] fix zero labels (#10820) update --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 1eae5db8f816..7ab75f17fb18 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -201,7 +201,7 @@ def random_perspective(im, # Transform label coordinates n = len(targets) if n: - use_segments = any(x.any() for x in segments) + use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample From d02ee60512c50d9573bb7a136d8baade8a0bd332 Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Sat, 4 Feb 2023 13:30:10 +0100 Subject: [PATCH 231/277] Update README.md (#10893) * Update README.md Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f19130f6f094..2938710214b4 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,17 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. +Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. + +Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - + + + ##
Segmentation ⭐ NEW
From b8a2c47fa94011260e0980a217dd7ec0d537414e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 15:11:32 +0400 Subject: [PATCH 232/277] Update Dockerfile `FROM pytorch/pytorch:latest` (#10902) * Update Dockerfile `FROM pytorch/pytorch:latest` Signed-off-by: Glenn Jocher * isort * precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spelling * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * Cleanup * Cleanup * Cleanup --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 34 ++- README.md | 107 ++++--- README.zh-CN.md | 168 +++++------ classify/tutorial.ipynb | 2 +- classify/val.py | 6 +- utils/docker/Dockerfile | 33 ++- utils/loggers/__init__.py | 24 +- utils/loggers/clearml/README.md | 39 +-- utils/loggers/comet/README.md | 10 +- utils/loggers/wandb/README.md | 162 ----------- utils/loggers/wandb/log_dataset.py | 27 -- utils/loggers/wandb/sweep.py | 41 --- utils/loggers/wandb/sweep.yaml | 143 ---------- utils/loggers/wandb/wandb_utils.py | 434 ++--------------------------- 14 files changed, 250 insertions(+), 980 deletions(-) delete mode 100644 utils/loggers/wandb/README.md delete mode 100644 utils/loggers/wandb/log_dataset.py delete mode 100644 utils/loggers/wandb/sweep.py delete mode 100644 utils/loggers/wandb/sweep.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f7ae077ee272..83425ad6cf78 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,7 @@ default_language_version: python: python3.8 +exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci ci: autofix_prs: true @@ -28,13 +29,13 @@ repos: hooks: - id: pyupgrade name: Upgrade code - args: [ --py37-plus ] + args: [--py37-plus] - - repo: https://github.com/PyCQA/isort - rev: 5.11.4 - hooks: - - id: isort - name: Sort imports + # - repo: https://github.com/PyCQA/isort + # rev: 5.11.4 + # hooks: + # - id: isort + # name: Sort imports - repo: https://github.com/pre-commit/mirrors-yapf rev: v0.32.0 @@ -50,15 +51,22 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README.zh-CN.md" - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 5.0.4 hooks: - id: flake8 name: PEP8 + + #- repo: https://github.com/codespell-project/codespell + # rev: v2.2.2 + # hooks: + # - id: codespell + # args: + # - --ignore-words-list=crate,nd + + #- repo: https://github.com/asottile/yesqa + # rev: v1.4.0 + # hooks: + # - id: yesqa diff --git a/README.md b/README.md index 2938710214b4..e836abf6d551 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,10 @@

- [English](README.md) | [简体中文](README.zh-CN.md) -
-
+[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +22,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics Licensing. -
+
@@ -49,7 +50,7 @@ To request an Enterprise License please complete the form at -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. @@ -78,13 +79,13 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7. We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official - **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` @@ -97,6 +98,7 @@ We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640
Segmentation Usage Examples  Open In Colab ### Train + YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. ```bash @@ -108,33 +110,41 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train ``` ### Val + Validate YOLOv5s-seg mask mAP on COCO dataset: + ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate ``` ### Predict + Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + ```bash python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) ``` -![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) ---- |--- +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ### Export + Export YOLOv5s-seg model to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 ```
- ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -164,10 +174,10 @@ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -245,7 +255,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Integrations

@@ -268,10 +277,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| - +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
@@ -280,7 +288,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. @@ -303,19 +310,19 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -327,7 +334,6 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
- ##
Classification ⭐ NEW
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -340,18 +346,18 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings sup We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | | [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | | [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | | [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | | [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | | [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | +| | | | | | | | | | | [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | | [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | | [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | | [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | +| | | | | | | | | | | [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | | [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | | [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | @@ -364,6 +370,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + @@ -371,6 +378,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x Classification Usage Examples  Open In Colab ### Train + YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. ```bash @@ -382,28 +390,37 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val + Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: + ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict + Use pretrained YOLOv5s-cls.pt to predict bus.jpg: + ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### Export + Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` - + ##
Environments
@@ -436,14 +453,13 @@ Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! -
+ ##
License
@@ -452,7 +468,6 @@ YOLOv5 is available under two different licenses: - **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. - **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). - ##
Contact
For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). diff --git a/README.zh-CN.md b/README.zh-CN.md index c406f35820a7..b69d3921df99 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英文](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -61,18 +61,18 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 @@ -253,8 +255,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
@@ -277,36 +279,36 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
图表笔记 -- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 -- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 -- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
### 预训练模型 -| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 -- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 -- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -321,33 +323,33 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | -| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) -- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` -- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
分类训练示例  Open In Colab @@ -382,7 +384,9 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### 模型导出 @@ -438,8 +442,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 03c1dd0bc0de..cc18aa934039 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -78,7 +78,7 @@ "source": [ "# 1. Predict\n", "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", "\n", "```shell\n", "python classify/predict.py --source 0 # webcam\n", diff --git a/classify/val.py b/classify/val.py index 8657036fb2a2..03ba817d5ea2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -128,9 +128,9 @@ def run( LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + acc_i = acc[targets == i] + top1i, top5i = acc_i.mean(0).tolist() + LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c8b88357cb6d..e18b2ac69678 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,23 +3,33 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.12-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir +# FROM docker.io/pytorch/pytorch:latest +FROM pytorch/pytorch:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 -# Install pip packages (uninstall torch nightly in favor of stable) +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Install pip packages COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -U pycocotools # install --upgrade -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 ultralytics \ - --extra-index-url https://download.pytorch.org/whl/cu113 +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + # tensorflow tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app @@ -32,6 +42,9 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables ENV OMP_NUM_THREADS=1 +# Cleanup +ENV DEBIAN_FRONTEND teletype + # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 22da87034f24..1e7f38e0d677 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,6 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -105,14 +101,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # W&B if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) + self.wandb = WandbLogger(self.opt) else: self.wandb = None @@ -175,7 +165,7 @@ def on_pretrain_routine_end(self, labels, names): self.comet_logger.on_pretrain_routine_end(paths) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) + log_dict = dict(zip(self.keys[:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -221,10 +211,10 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') if self.comet_logger: self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) @@ -253,7 +243,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): for i, name in enumerate(self.best_keys): self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) + self.wandb.end_epoch() if self.clearml: self.clearml.current_epoch_logged_images = set() # reset epoch image limit diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 3cf4c268583f..ca41c040193c 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -23,7 +23,6 @@ And so much more. It's up to you how many of these tools you want to use, you ca ![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) -

@@ -35,15 +34,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t 1. Install the `clearml` python package: - ```bash - pip install clearml - ``` + ```bash + pip install clearml + ``` 1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - ```bash - clearml-init - ``` + ```bash + clearml-init + ``` That's it! You're done 😎 @@ -60,18 +59,20 @@ pip install clearml>=1.2.0 This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. -PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` or with custom project and task name: + ```bash python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` This will capture: + - Source code + uncommitted changes - Installed packages - (Hyper)parameters @@ -94,7 +95,7 @@ There even more we can do with all of this information, like hyperparameter opti ## 🔗 Dataset Version Management -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! ![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) @@ -112,6 +113,7 @@ The YOLOv5 repository supports a number of different datasets by using yaml file |_ LICENSE |_ README.txt ``` + But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. @@ -132,13 +134,15 @@ Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `nam ### Upload Your Dataset -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + ```bash cd coco128 clearml-data sync --project YOLOv5 --name coco128 --folder . ``` The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + ```bash # Optionally add --parent if you want to base # this version on another dataset version, so no duplicate files are uploaded! @@ -177,7 +181,7 @@ python utils/loggers/clearml/hpo.py ## 🤯 Remote Execution (advanced) -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: - [YouTube video](https://youtu.be/MX3BrXnaULs) @@ -186,6 +190,7 @@ This is where the ClearML Agent comes into play. Check out what the agent can do In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + ```bash clearml-agent daemon --queue [--docker] ``` @@ -194,11 +199,11 @@ clearml-agent daemon --queue [--docker] With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! -🪄 Clone the experiment by right clicking it +🪄 Clone the experiment by right-clicking it 🎯 Edit the hyperparameters to what you wish them to be -⏳ Enqueue the task to any of the queues by right clicking it +⏳ Enqueue the task to any of the queues by right-clicking it ![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) @@ -206,7 +211,8 @@ With our agent running, we can give it some work. Remember from the HPO section Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + ```python # ... # Loggers @@ -214,16 +220,17 @@ data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML data_dict = loggers.clearml.data_dict # ... ``` + When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! ### Autoscaling workers -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! Check out the autoscalers getting started video below. diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8a361e2b211d..47e6a45654b8 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -23,7 +23,7 @@ pip install comet_ml There are two ways to configure Comet with YOLOv5. -You can either set your credentials through enviroment variables +You can either set your credentials through environment variables **Environment Variables** @@ -49,11 +49,12 @@ project_name= # This will default to 'yolov5' python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt ``` -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI yolo-ui # Try out an Example! + Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -65,6 +66,7 @@ Or better yet, try it out yourself in this Colab Notebook By default, Comet will log the following items ## Metrics + - Box Loss, Object Loss, Classification Loss for the training and validation data - mAP_0.5, mAP_0.5:0.95 metrics for the validation data. - Precision and Recall for the validation data @@ -121,7 +123,6 @@ You can control the frequency of logged predictions and the associated images by Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - ```shell python train.py \ --img 640 \ @@ -192,6 +193,7 @@ If you would like to use a dataset from Comet Artifacts, set the `path` variable # contents of artifact.yaml file path: "comet:///:" ``` + Then pass this file to your training script in the following way ```shell @@ -221,7 +223,7 @@ python train.py \ ## Hyperparameter Search with the Comet Optimizer -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. ### Configuring an Optimizer Sweep diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9..000000000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -
- -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb69307..000000000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f2778b..000000000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea0285f..000000000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 238f4edbf2a0..6bc2ec510d0a 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,110 +1,32 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# WARNING ⚠️ wandb is deprecated and will be removed in future release. +# See supported integrations at https://github.com/ultralytics/yolov5#integrations import logging import os import sys from contextlib import contextmanager from pathlib import Path -from typing import Dict -import yaml -from tqdm import tqdm +from utils.general import LOGGER, colorstr FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) +DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ + f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + LOGGER.warning(DEPRECATION_WARNING) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - class WandbLogger(): """Log training runs, datasets, models, and predictions to Weights & Biases. @@ -132,38 +54,16 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.wandb, self.wandb_run = wandb, wandb.run if wandb else None self.val_artifact, self.train_artifact = None, None self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: + if self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, @@ -172,51 +72,15 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - if isinstance(opt.data, dict): # This means another dataset manager has already processed the dataset info (e.g. ClearML) # and they will have stored the already processed dict in opt.data self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: @@ -231,81 +95,18 @@ def setup_training(self, opt): self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" + model_dir, _ = self.download_model_artifact(opt) + if model_dir: + self.weights = Path(model_dir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 if opt.evolve or opt.noplots: self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ @@ -332,190 +133,8 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + pass def log(self, log_dict): """ @@ -528,7 +147,7 @@ def log(self, log_dict): for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self, best_result=False): + def end_epoch(self): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. @@ -537,8 +156,6 @@ def end_epoch(self, best_result=False): """ if self.wandb_run: with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images try: wandb.log(self.log_dict) except BaseException as e: @@ -547,21 +164,7 @@ def end_epoch(self, best_result=False): ) self.wandb_run.finish() self.wandb_run = None - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): """ @@ -572,6 +175,7 @@ def finish_run(self): with all_logging_disabled(): wandb.log(self.log_dict) wandb.run.finish() + LOGGER.warning(DEPRECATION_WARNING) @contextmanager From b1a3126e5d9ffaddd2ae11362a0087c5541f08f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 22:12:26 +0400 Subject: [PATCH 233/277] Bump docker/build-push-action from 3 to 4 (#10911) * Bump docker/build-push-action from 3 to 4 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Docker (#10913) * Dockerfile standardizations and improvements * README fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 6 +++--- README.md | 7 ------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++----- utils/docker/Dockerfile-cpu | 6 +++--- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1d0bd30b22cb..4f7fff00677c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -29,7 +29,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -39,7 +39,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -48,7 +48,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . diff --git a/README.md b/README.md index e836abf6d551..33468d0635ad 100644 --- a/README.md +++ b/README.md @@ -446,13 +446,6 @@ Get started in seconds with our verified environments. Click each icon below for
-##
App
- -Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! - - -Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e18b2ac69678..b9448101b94c 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -28,7 +28,7 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index eed1410793a1..aea764d3b86b 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,11 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + # tensorflow-aarch64 tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 558f81f00584..356c06df727d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,9 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory From 8b5a7d417929ac51ce27a1fb1264b01dab72d612 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 22:41:03 +0400 Subject: [PATCH 234/277] Update Dockerfile (#10916) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b9448101b94c..c68b8dcdfd62 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx # RUN alias python=python3 # Create working directory -RUN mkdir -p /usr/src/app +RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents From 90f23519c854b96cf108a6179d214c54b3b5bda3 Mon Sep 17 00:00:00 2001 From: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Date: Tue, 7 Feb 2023 00:11:23 +0530 Subject: [PATCH 235/277] Improved the language in CONTRIBUTING.md (#10906) Signed-off-by: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7498f8995d40..71857faddb89 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,13 @@ Select `requirements.txt` to update by clicking on it in GitHub. ### 2. Click 'Edit this file' -Button is in top-right corner. +The button is in the top-right corner.

PR_step2

### 3. Make Changes -Change `matplotlib` version from `3.2.2` to `3.3`. +Change the `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

@@ -62,7 +62,7 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: If you spot a problem with YOLOv5 please submit a Bug Report! For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few -short guidelines below to help users provide what we need in order to get started. +short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating @@ -76,14 +76,14 @@ the problem should be: In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -- ✅ **Current** – Verify that your code is up-to-date with current +- ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 -**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. From 9ba18266b2e0ae085d975a987eb68d98a87155ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 01:58:47 +0400 Subject: [PATCH 236/277] Update Dockerfile (#10917) * Update Dockerfile Signed-off-by: Glenn Jocher * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 8 -------- utils/docker/Dockerfile-arm64 | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c68b8dcdfd62..0349c50526e0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -31,14 +31,6 @@ RUN pip install --no-cache -r requirements.txt albumentations comet gsutil noteb coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - # Set environment variables ENV OMP_NUM_THREADS=1 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index aea764d3b86b..2b08f2baaf76 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -19,7 +19,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + coremltools onnx onnxruntime 'openvino-dev>=2022.3' # tensorflow-aarch64 tensorflowjs \ # Create working directory From c3c8e71d7a58c8d07db5e015b5311a5fffda7f00 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 02:15:54 +0400 Subject: [PATCH 237/277] Update Dockerfile-arm64 (#10918) Docker fixes --- utils/docker/Dockerfile-arm64 | 4 ++-- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 2b08f2baaf76..0279dfb8c997 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,8 +18,8 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnxruntime 'openvino-dev>=2022.3' +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnxruntime # tensorflow-aarch64 tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 356c06df727d..19b2962d4cab 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,7 +18,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu From ea8508a638affa3cb150542ed733fc3aa70be3c2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:27:08 +0400 Subject: [PATCH 238/277] [pre-commit.ci] pre-commit suggestions (#10919) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - https://github.com/pre-commit/mirrors-yapf → https://github.com/google/yapf - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83425ad6cf78..b188048e63a6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: # - id: isort # name: Sort imports - - repo: https://github.com/pre-commit/mirrors-yapf + - repo: https://github.com/google/yapf rev: v0.32.0 hooks: - id: yapf @@ -54,7 +54,7 @@ repos: # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 From cec1b9bc923cdd235baa3b9b5c80e3700bc9b1dc Mon Sep 17 00:00:00 2001 From: Mahmoud Hegab Date: Tue, 7 Feb 2023 23:32:29 -0800 Subject: [PATCH 239/277] add the dropout_p parameter (#10805) * add the dropout_p parameter Signed-off-by: Mahmoud Hegab * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Mahmoud Hegab Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..71340688d2e0 100644 --- a/models/common.py +++ b/models/common.py @@ -846,12 +846,19 @@ def forward(self, x): class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + def __init__(self, + c1, + c2, + k=1, + s=1, + p=None, + g=1, + dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) - self.drop = nn.Dropout(p=0.0, inplace=True) + self.drop = nn.Dropout(p=dropout_p, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): From a3c0fd05216a1fdb9f1ba0aff2e5421819b871ed Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:45:58 +0000 Subject: [PATCH 240/277] [Snyk] Fix for 2 vulnerabilities (#10931) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 - https://snyk.io/vuln/SNYK-PYTHON-WHEEL-3180413 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index c0e4a91d7dd1..ce205f43c5dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 976fa99e5c1d7f5b49f8e7ae458ff3bf93459135 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:52:22 +0000 Subject: [PATCH 241/277] [Snyk] Security upgrade gunicorn from 19.9.0 to 19.10.0 (#10933) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-GUNICORN-541164 --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 42d7ffc0eed8..b6b496feaa7b 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones pip==21.1 Flask==1.0.2 -gunicorn==19.9.0 +gunicorn==19.10.0 From a270b4f1252b65bf60f3996cf9ec9ac01ce3a466 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:54:04 +0000 Subject: [PATCH 242/277] [Snyk] Security upgrade setuptools from 39.0.1 to 65.5.1 (#10934) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index ce205f43c5dd..eee15ddf93c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +setuptools>=65.5.1 # Snyk vulnerability fix wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 From e326252ee4af03b4514f20262b719bf0a9468161 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 16:57:18 +0400 Subject: [PATCH 243/277] Security improvements (#10942) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- utils/general.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cbb3114e94d8..02c2a79f5747 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -52,7 +52,7 @@ def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.md5(str(size).encode()) # hash sizes + h = hashlib.sha256(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash diff --git a/utils/general.py b/utils/general.py index 0bbcb6e7334c..63cc29bfb35d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -14,6 +14,7 @@ import random import re import signal +import subprocess import sys import time import urllib @@ -551,7 +552,7 @@ def check_dataset(data, autodownload=True): r = None # success elif s.startswith('bash '): # bash script LOGGER.info(f'Running {s} ...') - r = os.system(s) + r = subprocess.run(s, shell=True) else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' @@ -648,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - os.system(f'tar xf {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip if delete: f.unlink() # remove zip @@ -1022,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1046,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload def apply_classifier(x, model, img, im0): From 61407c93cc0cbabcfbd6de51a3c8293b99219e2e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:18:27 +0400 Subject: [PATCH 244/277] Security improvements for subprocess.run() (#10943) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 63cc29bfb35d..4d5e94bc98f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -649,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip elif f.suffix == '.gz': - subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip if delete: f.unlink() # remove zip @@ -1023,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local + subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1047,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload + subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload def apply_classifier(x, model, img, im0): From 238da321cb365533a99d36a1e768d1d4259b6766 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:58:24 +0400 Subject: [PATCH 245/277] Security3 (#10944) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 4 +++- segment/val.py | 3 ++- train.py | 4 +++- utils/downloads.py | 3 ++- utils/general.py | 3 +-- val.py | 3 ++- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/segment/train.py b/segment/train.py index 3f32d2100a75..883c8b0a2b62 100644 --- a/segment/train.py +++ b/segment/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -597,7 +598,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 248d2bee9be1..8168b5407c1d 100644 --- a/segment/val.py +++ b/segment/val.py @@ -23,6 +23,7 @@ import argparse import json import os +import subprocess import sys from multiprocessing.pool import ThreadPool from pathlib import Path @@ -461,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index 5d75f22b6335..db65f2c74c6c 100644 --- a/train.py +++ b/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -571,7 +572,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index 72ea87340eb9..a3ff9274066e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -50,7 +50,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + subprocess.run( + f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 4d5e94bc98f9..4e5c7147fd40 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,7 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download diff --git a/val.py b/val.py index 599aa1afdd4a..62fa2c980988 100644 --- a/val.py +++ b/val.py @@ -22,6 +22,7 @@ import argparse import json import os +import subprocess import sys from pathlib import Path @@ -397,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 35d6d9f408e5f1e02e5edc8f4bd6976bcf3bff8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 20:32:58 +0400 Subject: [PATCH 246/277] Update Dockerfile-arm64 (#10945) * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher * Update Dockerfile-cpu Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 0279dfb8c997..b2e381f089d2 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 +FROM arm64v8/ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 19b2962d4cab..dcc71924564b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 +FROM ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From a895e98172a595252d1f8b5064de344b7ecafbec Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 20:21:38 +0000 Subject: [PATCH 247/277] [Snyk] Security upgrade ubuntu from latest to rolling (#10946) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314744 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314768 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314792 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314816 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314840 * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index b2e381f089d2..7023c6a4bb1f 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:latest +FROM arm64v8/ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index dcc71924564b..06bad9a3790d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:latest +FROM ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From fd38767ea84453107ec3a19893fb2dd4e5034216 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 10 Feb 2023 20:00:40 +0530 Subject: [PATCH 248/277] Update README and greetings with YOLOv8 info (#10735) * update * update * update * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Ayush Chaurasia * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README * Update README * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * Update README with YOLOv8 --------- Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 12 +- README.md | 234 ++++++++++++++++---------------- README.zh-CN.md | 46 +++---- 3 files changed, 148 insertions(+), 144 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 5e1589c340ed..8d780a23e2b5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -25,11 +25,9 @@ jobs: issue-message: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). - If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - - For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). ## Requirements @@ -55,3 +53,9 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + ## YOLOv8 + + Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/README.md b/README.md index 33468d0635ad..e00ec478a85b 100644 --- a/README.md +++ b/README.md @@ -45,105 +45,25 @@ To request an Enterprise License please complete the form at
- -##
Ultralytics Live Session
- -
- -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ - -Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. - -Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! - -To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - - - -
- -##
Segmentation ⭐ NEW
- -
- - -
- -Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. - -
- Segmentation Checkpoints -
-We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. +##
YOLOv8 🚀 NEW
-| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- Segmentation Usage Examples  Open In Colab +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -### Train - -YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` - -### Val - -Validate YOLOv5s-seg mask mAP on COCO dataset: - -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### Predict - -Use pretrained YOLOv5m-seg.pt to predict bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # load from PyTorch Hub (WARNING: inference not yet supported) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### Export - -Export YOLOv5s-seg model to ONNX and TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```commandline +pip install ultralytics ``` -
+
+ + +
##
Documentation
@@ -312,17 +232,17 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | | ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -334,7 +254,87 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
-##
Classification ⭐ NEW
+##
Segmentation
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train + +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5s-seg mask mAP on COCO dataset: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict + +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### Export + +Export YOLOv5s-seg model to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
Classification
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -347,21 +347,21 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | | -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) @@ -463,7 +463,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/).
diff --git a/README.zh-CN.md b/README.zh-CN.md index b69d3921df99..bd38e8f457be 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -290,17 +290,17 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 | 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | | ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 @@ -325,21 +325,21 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 | 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | | -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) From 80e54473905c08b1c4c771056a0f5c1a261736d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 18:59:22 +0400 Subject: [PATCH 249/277] Dockerfile `openssl` security update (#10949) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 0349c50526e0..54927c03eb80 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,6 +16,10 @@ RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt install --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From d389840f66bb95c150d8c0e4d97759b07d21e821 Mon Sep 17 00:00:00 2001 From: David Strahm Date: Fri, 10 Feb 2023 16:11:08 +0100 Subject: [PATCH 250/277] Allow int8 quantization for export_tfjs (#10948) * Allow int8 quantization for export_tfjs --int8 param currently has no effect on export_tfjs. With this change, ` python export.py --weights ../path/to/best.pt --include tfjs --int8` will add the --quantize_uint8 param to the tensorflowjs_converter script, greatly reducing model size for web usage. Signed-off-by: David Strahm * Update Dockerfile Signed-off-by: Glenn Jocher --------- Signed-off-by: David Strahm Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- export.py | 8 +++++--- utils/docker/Dockerfile | 9 ++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/export.py b/export.py index 9ca3441bc66a..1bf0532dde34 100644 --- a/export.py +++ b/export.py @@ -426,7 +426,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): @try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): +def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') import tensorflowjs as tfjs @@ -436,7 +436,9 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + int8_export = ' --quantize_uint8 ' if int8 else '' + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) @@ -588,7 +590,7 @@ def run( f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: - f[9], _ = export_tfjs(file) + f[9], _ = export_tfjs(file, int8) if paddle: # PaddlePaddle f[10], _ = export_paddle(model, im, file, metadata) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 54927c03eb80..cfe7b0635c28 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,13 +13,12 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ + # Security updates + # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 + openssl # RUN alias python=python3 -# Security updates -# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 -RUN apt install --no-install-recommends -y openssl - # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 5c3eba664e228d0416285e94954a8a42751bf98b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 21:19:08 +0400 Subject: [PATCH 251/277] Update Dockerfile `apt upgrade openssl` (#10951) Update Dockerfile upgrade `openssl` Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index cfe7b0635c28..b5d2af9fb08e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,12 +13,13 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ - # Security updates - # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 - openssl +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 416a132ceab4d0e2cd4857e8c1e02950c10d80d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 03:07:13 +0400 Subject: [PATCH 252/277] Update README.md (#10952) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update partner logo hosting * Update partner logo hosting --------- Signed-off-by: Glenn Jocher --- README.md | 11 ++++++----- README.zh-CN.md | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e00ec478a85b..25d12b289d09 100644 --- a/README.md +++ b/README.md @@ -185,16 +185,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | @@ -452,7 +452,8 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - + + ##
License
diff --git a/README.zh-CN.md b/README.zh-CN.md index bd38e8f457be..c581842c6556 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -243,16 +243,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | @@ -436,7 +436,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + + ##
License
From 25c17370dd0bc6f6b42cc29592750cf3334797dd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 12:45:18 +0400 Subject: [PATCH 253/277] Update greetings.yml (#10955) * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8d780a23e2b5..42a2463585a8 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -53,9 +53,13 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - ## YOLOv8 + ## Introducing YOLOv8 🚀 - Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! + + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: ```bash pip install ultralytics ``` From fa4bdbe14d33b3aa74e2eac5bdb940cc4b337198 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 14:24:49 +0400 Subject: [PATCH 254/277] Update README.zh-CN.md (#10956) * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 2 +- README.zh-CN.md | 181 +++++++++++++++++++++++++----------------------- 2 files changed, 97 insertions(+), 86 deletions(-) diff --git a/README.md b/README.md index 25d12b289d09..3a0e2fe1a188 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) diff --git a/README.zh-CN.md b/README.zh-CN.md index c581842c6556..c25dc0c3326a 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)|[简体中文](README.zh-CN.md)
@@ -45,87 +45,23 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表实例分割模型 ⭐ 新
- - - -我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 - -
- 实例分割模型列表 - -
- -我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 - -| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- 分割模型使用示例  Open In Colab - -### 训练 - -YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 - -```bash -# 单 GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 +##
YOLOv8 🚀 NEW
-# 多 GPU, DDP 模式 -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -### 验证 - -在 COCO 数据集上验证 YOLOv5s-seg mask mAP: +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -```bash -bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +```commandline +pip install ultralytics ``` -### 预测 - -使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### 模型导出 - -将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 -``` - -
+
+ + +
##
文档
@@ -312,6 +248,88 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
+##
实例分割模型 ⭐ 新
+ +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 + +
+ 实例分割模型列表 + +
+ +
+ + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 + +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ 分割模型使用示例  Open In Colab + +### 训练 + +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 + +```bash +# 单 GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# 多 GPU, DDP 模式 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 验证 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 模型导出 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ ##
分类网络 ⭐ 新
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。 @@ -423,13 +441,6 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
-##
APP
- -通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! - - -Ultralytics mobile app - ##
贡献
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -448,7 +459,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。 +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。
From 1a2eb532cec4b5f0a4b295554b3c73ae9f7fff3a Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 15:38:27 +0200 Subject: [PATCH 255/277] Fix return value check for subprocess.run (#10972) Subprocess.run does not return an integer. Regressed in #10944 --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4e5c7147fd40..01f0a3bddc7d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,8 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = r == 0 + proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) + success = proc.returncode == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() From a2de5c5bf61f1165ffeb4af51dc5b24e8d04bff6 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 16:00:31 +0200 Subject: [PATCH 256/277] Subprocess improvements (#10973) * Use list-form arguments for subprocess.run calls where possible Augments #10944 * Deduplicate curl code * Avoid eval() to parse integer --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- classify/train.py | 2 +- export.py | 35 ++++++++++++++++++++++++++--------- segment/train.py | 8 ++++++-- segment/val.py | 2 +- train.py | 8 ++++++-- utils/downloads.py | 29 +++++++++++++++++++++++++---- utils/general.py | 6 ++---- val.py | 2 +- 8 files changed, 68 insertions(+), 24 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4767be77bd61..8ae2fdd52828 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) diff --git a/export.py b/export.py index 1bf0532dde34..2c9fb77d17be 100644 --- a/export.py +++ b/export.py @@ -194,8 +194,15 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export + args = [ + "mo", + "--input_model", + str(file.with_suffix('.onnx')), + "--output_dir", + f, + "--data_type", + ("FP16" if half else "FP32"),] + subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -420,8 +427,15 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) + subprocess.run([ + 'edgetpu_compiler', + '-s', + '-d', + '-k', + '10', + '--out_dir', + str(file.parent), + f_tfl,], check=True) return f, None @@ -436,11 +450,14 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - int8_export = ' --quantize_uint8 ' if int8 else '' - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) + args = [ + 'tensorflowjs_converter', + '--input_format=tf_frozen_model', + '--quantize_uint8' if int8 else '', + '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + str(f_pb), + str(f),] + subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order diff --git a/segment/train.py b/segment/train.py index 883c8b0a2b62..4914f9613a3d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -598,8 +598,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 8168b5407c1d..665b540a5490 100644 --- a/segment/val.py +++ b/segment/val.py @@ -462,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index db65f2c74c6c..ccda0a7fe2e3 100644 --- a/train.py +++ b/train.py @@ -572,8 +572,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index a3ff9274066e..2610f3c66aac 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -26,8 +26,10 @@ def is_url(url, check=True): def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes + output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + if output: + return int(output.split()[0]) + return 0 def url_getsize(url='https://ultralytics.com/images/bus.jpg'): @@ -36,6 +38,25 @@ def url_getsize(url='https://ultralytics.com/images/bus.jpg'): return int(response.headers.get('content-length', -1)) +def curl_download(url, filename, *, silent: bool = False) -> bool: + """ + Download a file from a url to a filename using curl. + """ + silent_option = 'sS' if silent else '' # silent + proc = subprocess.run([ + 'curl', + '-#', + f'-{silent_option}L', + url, + '--output', + filename, + '--retry', + '9', + '-C', + '-',]) + return proc.returncode == 0 + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER @@ -50,8 +71,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - subprocess.run( - f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail + # curl download, retry and resume on fail + curl_download(url2 or url, file) finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 01f0a3bddc7d..a6af4f3216dd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize +from utils.downloads import gsutil_getsize, curl_download from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -630,9 +630,7 @@ def download_one(url, dir): LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: - s = 'sS' if threads > 1 else '' # silent - proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = proc.returncode == 0 + success = curl_download(url, f, silent=(threads > 1)) else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() diff --git a/val.py b/val.py index 62fa2c980988..7829afb68b79 100644 --- a/val.py +++ b/val.py @@ -398,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 4d28fec3b8b663fa8225634ca8eeb4446505527e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 13 Feb 2023 20:27:22 +0400 Subject: [PATCH 257/277] Update README.md (#10975) @pderrenger YOLOv5 HUB copy updates Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a0e2fe1a188..2a28ea11490a 100644 --- a/README.md +++ b/README.md @@ -203,7 +203,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
From e7b60999ad88a40bfb84c539bed1e6ec11249af2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:28:20 +0400 Subject: [PATCH 258/277] Fix Comet link (#10990) @DN6 fixes YOLOv5 Comet link we chatted about Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a28ea11490a..16dfd9fca085 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From e4d836080f68dd14ae9becaa7b50c510ac1db54f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:31:41 +0400 Subject: [PATCH 259/277] Update README.zh-CN.md (#10991) Signed-off-by: Glenn Jocher --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index c25dc0c3326a..17c046c8d98d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -184,7 +184,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + @@ -193,7 +193,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 4dd1caaf9af97ca56d7938a4baf3be8d0ea0a3ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 21:07:55 +0400 Subject: [PATCH 260/277] Update README.md (#10992) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 17c046c8d98d..800a670cfb4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -191,8 +191,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 6d283ec167b60a0160eb275323a9b13b563ff804 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 16 Feb 2023 17:08:13 +0000 Subject: [PATCH 261/277] [Snyk] Security upgrade werkzeug from 1.0.1 to 2.2.3 (#10995) * fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319935 - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319936 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/google_app_engine/additional_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index b6b496feaa7b..d5b76758c876 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -2,3 +2,4 @@ pip==21.1 Flask==1.0.2 gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability From 226a5e43cbceff5de43a71c4fb3f3f7478a9bb03 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 23:48:42 +0400 Subject: [PATCH 262/277] Update ci-testing.yml benchmarks to Python 3.10 (#10997) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f31bb6e6ce3c..f9c62d623042 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ '3.9' ] # requires python<=3.9 + python-version: [ '3.10' ] # requires python<=3.10 model: [ yolov5n ] steps: - uses: actions/checkout@v3 From 34e1bc8ee3cabc809bb3302b0cc6de4f6dcce10e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Feb 2023 13:53:45 +0400 Subject: [PATCH 263/277] Update downloads.py (#11005) * Update downloads.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 2610f3c66aac..e739919540b4 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -120,11 +120,9 @@ def github_assets(repository, version='latest'): file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') return str(file) From 7a972e86c4e5009830d5e6faacadfe6e1ed2efff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Feb 2023 01:06:24 +0100 Subject: [PATCH 264/277] Update .pre-commit-config.yaml (#11009) * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Pre-commit updates * Pre-commit updates --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 35 ++--- benchmarks.py | 2 +- classify/predict.py | 4 +- classify/train.py | 26 ++-- classify/tutorial.ipynb | 2 +- classify/val.py | 8 +- detect.py | 2 +- export.py | 26 ++-- models/common.py | 16 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/tf.py | 12 +- segment/predict.py | 2 +- segment/train.py | 14 +- segment/tutorial.ipynb | 2 +- segment/val.py | 16 +- train.py | 6 +- tutorial.ipynb | 2 +- utils/__init__.py | 2 +- utils/dataloaders.py | 34 ++--- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 8 +- utils/flask_rest_api/restapi.py | 22 +-- utils/general.py | 48 +++--- utils/loggers/__init__.py | 16 +- utils/loggers/clearml/clearml_utils.py | 6 +- utils/loggers/comet/__init__.py | 192 ++++++++++++------------ utils/loggers/comet/comet_utils.py | 42 +++--- utils/loggers/comet/hpo.py | 32 ++-- utils/loggers/wandb/wandb_utils.py | 10 +- utils/metrics.py | 10 +- utils/plots.py | 2 +- utils/segment/dataloaders.py | 32 ++-- utils/segment/loss.py | 12 +- utils/segment/metrics.py | 90 +++++------ utils/segment/plots.py | 20 +-- utils/torch_utils.py | 4 +- utils/triton.py | 14 +- val.py | 4 +- 39 files changed, 389 insertions(+), 392 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b188048e63a6..c5162378ab81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,5 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci @@ -16,13 +13,13 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # - id: end-of-file-fixer + - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml - - id: check-toml - - id: pretty-format-json - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 @@ -31,11 +28,11 @@ repos: name: Upgrade code args: [--py37-plus] - # - repo: https://github.com/PyCQA/isort - # rev: 5.11.4 - # hooks: - # - id: isort - # name: Sort imports + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports - repo: https://github.com/google/yapf rev: v0.32.0 @@ -59,12 +56,12 @@ repos: - id: flake8 name: PEP8 - #- repo: https://github.com/codespell-project/codespell - # rev: v2.2.2 - # hooks: - # - id: codespell - # args: - # - --ignore-words-list=crate,nd + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota #- repo: https://github.com/asottile/yesqa # rev: v1.4.0 diff --git a/benchmarks.py b/benchmarks.py index 03d7d693a936..09108b8a7cc4 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -164,6 +164,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index 5a5edabda42c..5f0d40787b52 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -179,7 +179,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image @@ -221,6 +221,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index 8ae2fdd52828..b752a3c1fe32 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) @@ -220,11 +220,11 @@ def train(opt, device): # Log metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate logger.log_metrics(metrics, epoch) # Save model @@ -251,11 +251,11 @@ def train(opt, device): if RANK in {-1, 0} and final_epoch: LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") + f'\nVisualize: https://netron.app\n') # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels @@ -263,7 +263,7 @@ def train(opt, device): file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) logger.log_model(best, epochs, metadata=meta) @@ -310,7 +310,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +328,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index cc18aa934039..58723608bdbe 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1477,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/classify/val.py b/classify/val.py index 03ba817d5ea2..4edd5a1f5e9e 100644 --- a/classify/val.py +++ b/classify/val.py @@ -100,7 +100,7 @@ def run( pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: @@ -123,14 +123,14 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): acc_i = acc[targets == i] top1i, top5i = acc_i.mean(0).tolist() - LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image @@ -165,6 +165,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/detect.py b/detect.py index 2d13401f78bd..3f32d7a50d6b 100644 --- a/detect.py +++ b/detect.py @@ -256,6 +256,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/export.py b/export.py index 2c9fb77d17be..e8287704866a 100644 --- a/export.py +++ b/export.py @@ -120,7 +120,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) @@ -195,13 +195,13 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') args = [ - "mo", - "--input_model", + 'mo', + '--input_model', str(file.with_suffix('.onnx')), - "--output_dir", + '--output_dir', f, - "--data_type", - ("FP16" if half else "FP32"),] + '--data_type', + ('FP16' if half else 'FP32'),] subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -237,7 +237,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') @@ -293,7 +293,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) @@ -403,7 +403,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, "wb").write(tflite_model) + open(f, 'wb').write(tflite_model) return f, None @@ -618,14 +618,14 @@ def run( det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") + f'\nVisualize: https://netron.app') return f # return list of exported files/dirs @@ -667,6 +667,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/models/common.py b/models/common.py index 71340688d2e0..f416ddf25eb8 100644 --- a/models/common.py +++ b/models/common.py @@ -380,11 +380,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) + network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -431,7 +431,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -445,7 +445,7 @@ def gd_outputs(gd): gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -467,9 +467,9 @@ def gd_outputs(gd): output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, "r") as model: + with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') @@ -491,7 +491,7 @@ def gd_outputs(gd): check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") + nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -608,7 +608,7 @@ def _model_type(p='path/to/model.pt'): url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] @staticmethod diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index f73d1992ac19..07ec25ba264d 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 7cbdb36b425c..a827814e1399 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..8290cf2e57f5 100644 --- a/models/tf.py +++ b/models/tf.py @@ -356,7 +356,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +371,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" + assert dimension == 1, 'convert only NCHW to NHWC concat' self.d = 3 def call(self, inputs): @@ -523,17 +523,17 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", + mode='CONSTANT', constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -603,6 +603,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/predict.py b/segment/predict.py index e9093baa1cc7..d82df89a85b0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -279,6 +279,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 4914f9613a3d..2e71de131a8d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -139,7 +139,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) + logger.update_params({'batch_size': batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer @@ -341,10 +341,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') if ni == 10: files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) + logger.log_images(files, 'Mosaics', epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -454,8 +454,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) torch.cuda.empty_cache() return results @@ -548,7 +548,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -659,6 +659,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb1af34d9f17..cb52045bcb25 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -591,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/val.py b/segment/val.py index 665b540a5490..a7f95fe9b6fc 100644 --- a/segment/val.py +++ b/segment/val.py @@ -70,8 +70,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -105,7 +105,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -231,8 +231,8 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) @@ -343,7 +343,7 @@ def run( # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') @@ -369,7 +369,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -468,6 +468,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/train.py b/train.py index ccda0a7fe2e3..c4e3aac3561a 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) + loggers.on_params_update({'batch_size': batch_size}) # Optimizer nbs = 64 # nominal batch size @@ -522,7 +522,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -635,6 +635,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index c320d699a940..32af68b57945 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -973,4 +973,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..d158c5515a12 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -69,7 +69,7 @@ def notebook_init(verbose=True): if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") + total, used, free = shutil.disk_usage('/') display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 02c2a79f5747..7687a2ba2665 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -89,7 +89,7 @@ def exif_transpose(image): if method is not None: image = image.transpose(method) del exif[0x0112] - image.info["exif"] = exif.tobytes() + image.info['exif'] = exif.tobytes() return image @@ -212,11 +212,11 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor["top"] if top is None else (monitor["top"] + top) - self.left = monitor["left"] if left is None else (monitor["left"] + left) - self.width = width or monitor["width"] - self.height = height or monitor["height"] - self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self @@ -224,7 +224,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms @@ -239,7 +239,7 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: @@ -358,7 +358,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' @@ -373,7 +373,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline @@ -495,7 +495,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -598,8 +598,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=''): mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " - f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' f"{'caching images ✅' if cache else 'not caching images ⚠️'}") return cache @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning {path.parent / path.stem}..." + desc = f'{prefix}Scanning {path.parent / path.stem}...' with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' pbar.close() if msgs: @@ -1063,7 +1063,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): if zipped: data['path'] = data_dir except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e + raise Exception('error/HUB/dataset_stats/yaml_load') from e check_dataset(data, autodownload) # download dataset if missing self.hub_dir = Path(data['path'] + '-hub') @@ -1188,7 +1188,7 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] else: sample = self.torch_transforms(im) return sample, j diff --git a/utils/downloads.py b/utils/downloads.py index e739919540b4..643b529fba3b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -77,7 +77,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') LOGGER.info('') diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 773ad8932967..952e5dcb90fa 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -7,13 +7,13 @@ import requests -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' # Read image -with open(IMAGE, "rb") as f: +with open(IMAGE, 'rb') as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={"image": image_data}).json() +response = requests.post(DETECTION_URL, files={'image': image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 8482435c861e..9258b1a68860 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -13,36 +13,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = "/v1/object-detection/" +DETECTION_URL = '/v1/object-detection/' -@app.route(DETECTION_URL, methods=["POST"]) +@app.route(DETECTION_URL, methods=['POST']) def predict(model): - if request.method != "POST": + if request.method != 'POST': return - if request.files.get("image"): + if request.files.get('image'): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files["image"] + im_file = request.files['image'] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + return results.pandas().xyxy[0].to_json(orient='records') -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index a6af4f3216dd..b6efe6bb8732 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize, curl_download +from utils.downloads import curl_download, gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -90,11 +90,11 @@ def is_kaggle(): def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): + if Path('/.dockerenv').exists(): return True try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) except OSError: return False @@ -113,7 +113,7 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = "yolov5" +LOGGING_NAME = 'yolov5' def set_logging(name=LOGGING_NAME, verbose=True): @@ -121,21 +121,21 @@ def set_logging(name=LOGGING_NAME, verbose=True): rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { name: { - "format": "%(message)s"}}, - "handlers": { + 'format': '%(message)s'}}, + 'handlers': { name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -218,7 +218,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -299,7 +299,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility return True except OSError: return False @@ -386,7 +386,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta check_python() # check python version if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." + assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -450,7 +450,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' def check_yaml(file, suffix=('.yaml', '.yml')): @@ -556,8 +556,8 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}') check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary @@ -673,7 +673,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1e7f38e0d677..9de1f226233c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -121,8 +121,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # Comet if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -158,7 +158,7 @@ def on_pretrain_routine_end(self, labels, names): plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks if self.comet_logger: @@ -212,7 +212,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) if self.clearml: self.clearml.log_debug_samples(files, title='Validation') @@ -279,7 +279,7 @@ def on_train_end(self, last, best, epoch, results): if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact(str(best if best.exists() else last), @@ -329,7 +329,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): if wandb and 'wandb' in self.include: self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, + name=None if opt.name == 'exp' else opt.name, config=opt) else: self.wandb = None @@ -370,12 +370,12 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) def update_params(self, params): - # Update the paramters logged + # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 3457727a96a4..2764abe90da8 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -25,7 +25,7 @@ def construct_dataset(clearml_info_string): dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) if len(yaml_filenames) > 1: raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 'the dataset definition this way.') @@ -100,7 +100,7 @@ def __init__(self, opt, hyp): self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", + self.task.set_base_docker('ultralytics/yolov5:latest', docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', docker_setup_bash_script='pip install clearml') @@ -150,7 +150,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" + label = f'{class_name}: {confidence_percentage}%' if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b0318f88d6a6..d4599841c9fc 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') except (ModuleNotFoundError, ImportError): comet_ml = None COMET_PROJECT_NAME = None @@ -31,32 +31,32 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = "comet://" +COMET_PREFIX = 'comet://' -COMET_MODE = os.getenv("COMET_MODE", "online") +COMET_MODE = os.getenv('COMET_MODE', 'online') # Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' -RANK = int(os.getenv("RANK", -1)) +RANK = int(os.getenv('RANK', -1)) to_pil = T.ToPILImage() @@ -66,7 +66,7 @@ class CometLogger: with Comet """ - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,52 +87,52 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") + self.experiment.log_other('Created from', 'YOLOv5') if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, ) self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, "conf_thres"): + if hasattr(self.opt, 'conf_thres'): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): + if hasattr(self.opt, 'iou_thres'): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: @@ -147,22 +147,22 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": + if mode == 'offline': if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, @@ -182,11 +182,11 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) return @@ -210,12 +210,12 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} - model_files = glob.glob(f"{path}/*.pt") + model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: name = Path(model_path).name @@ -232,12 +232,12 @@ def check_dataset(self, data_file): data_config = yaml.safe_load(f) if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") + path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) return check_dataset(data_file) @@ -253,8 +253,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -263,22 +263,22 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -305,35 +305,35 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) + img_paths = sorted(glob.glob(f'{asset_path}/*')) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") + logger.error(f'COMET ERROR: {e}') continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: + for key in ['train', 'val', 'test']: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, "") + metadata[key] = split_path.replace(path, '') - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) for key in metadata.keys(): - if key in ["train", "val", "test"]: + if key in ['train', 'val', 'test']: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -352,13 +352,13 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir + data_dict['path'] = artifact_save_dir - metadata_names = metadata.get("names") + metadata_names = metadata.get('names') if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" @@ -366,13 +366,13 @@ def download_dataset_artifact(self, artifact_path): return data_dict def update_data_paths(self, data_dict): - path = data_dict.get("path", "") + path = data_dict.get('path', '') - for split in ["train", "val", "test"]: + for split in ['train', 'val', 'test']: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) return data_dict @@ -413,11 +413,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -481,7 +481,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append("background") + class_names.append('background') num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -491,7 +491,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) epoch=epoch, column_label='Actual Category', row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", + file_name=f'confusion-matrix-epoch-{epoch}.json', ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 3cbd45156b57..27600761ad28 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" + model_dir = f'{opt.project}/{experiment.name}' os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') return model_asset_list = sorted( model_asset_list, - key=lambda x: x["step"], + key=lambda x: x['step'], reverse=True, ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,22 +44,22 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') return try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') logger.exception(e) @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,11 +85,11 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" + save_dir = f'{opt.project}/{experiment.name}' os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 7dd5c92e8de1..fc49115c1358 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,7 +21,7 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') def get_args(known=False): @@ -68,30 +68,30 @@ def get_args(known=False): parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', type=int, default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == "__main__": +if __name__ == '__main__': opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +99,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +110,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] - logger.info("COMET INFO: Starting Hyperparameter Sweep") + logger.info('COMET INFO: Starting Hyperparameter Sweep') for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) + run(parameter['parameters'], opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6bc2ec510d0a..c8ab38197381 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH RANK = int(os.getenv('RANK', -1)) DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ - f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' try: import wandb @@ -65,7 +65,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.data_dict = None if self.wandb: self.wandb_run = wandb.init(config=opt, - resume="allow", + resume='allow', project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, @@ -97,7 +97,7 @@ def setup_training(self, opt): if isinstance(opt.resume, str): model_dir, _ = self.download_model_artifact(opt) if model_dir: - self.weights = Path(model_dir) / "last.pt" + self.weights = Path(model_dir) / 'last.pt' config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ @@ -131,7 +131,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') def val_one_image(self, pred, predn, path, names, im): pass @@ -160,7 +160,7 @@ def end_epoch(self): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' ) self.wandb_run.finish() self.wandb_run = None diff --git a/utils/metrics.py b/utils/metrics.py index 7fb077774384..95f364c23f34 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -194,14 +194,14 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" + ticklabels = (names + ['background']) if labels else 'auto' with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, ax=ax, annot=nc < 30, annot_kws={ - "size": 8}, + 'size': 8}, cmap='Blues', fmt='.2f', square=True, @@ -331,7 +331,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) @@ -354,7 +354,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index f84aed9fb5c7..24c618c80b59 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -450,7 +450,7 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() if verbose: - LOGGER.info(f"Saving {f}") + LOGGER.info(f'Saving {f}') if labels is not None: LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) if pred is not None: diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d66b36115e3f..097a5d5cb058 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -95,7 +95,7 @@ def __init__( stride=32, pad=0, min_items=0, - prefix="", + prefix='', downsample_ratio=1, overlap=False, ): @@ -116,7 +116,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp["mixup"]: + if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -147,11 +147,11 @@ def __getitem__(self, index): img, labels, segments = random_perspective(img, labels, segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: @@ -177,17 +177,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down - if random.random() < hyp["flipud"]: + if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp["fliplr"]: + if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -251,15 +251,15 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b45b2c27e0a0..2a8a4c680f6f 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -83,7 +83,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -101,10 +101,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -112,7 +112,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index b09ce23fb9e3..c9f137e38ead 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -21,7 +21,7 @@ def ap_per_class_box_and_mask( pred_cls, target_cls, plot=False, - save_dir=".", + save_dir='.', names=(), ): """ @@ -37,7 +37,7 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Box")[2:] + prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, @@ -45,21 +45,21 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Mask")[2:] + prefix='Mask')[2:] results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} return results @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,33 @@ def ap_class_index(self): KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 9b90900b3772..3ba097624fcd 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -108,13 +108,13 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' annotator.im.save(fname) # save -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) @@ -125,19 +125,19 @@ def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..5b67b3fa7a06 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -291,7 +291,7 @@ def model_info(model, verbose=False, imgsz=640): fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -342,7 +342,7 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') return optimizer diff --git a/utils/triton.py b/utils/triton.py index a94ef0ad197d..25928021477e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -21,7 +21,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": + if parsed_url.scheme == 'grpc': from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,7 +31,7 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] else: from tritonclient.http import InferenceServerClient, InferInput @@ -43,14 +43,14 @@ def create_input_placeholders() -> typing.List[InferInput]: def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) + return self.metadata.get('backend', self.metadata.get('platform')) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: """ Invokes the model. Parameters can be provided via args or kwargs. @@ -68,14 +68,14 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") + raise RuntimeError('No inputs provided.') if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") + raise RuntimeError('Cannot specify args and kwargs at the same time') placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 7829afb68b79..d4073b42fe78 100644 --- a/val.py +++ b/val.py @@ -304,7 +304,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -404,6 +404,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) From 4db6757ef9d43f49a780ff29deb06b28e96fbe84 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Mon, 20 Feb 2023 18:23:13 +0800 Subject: [PATCH 265/277] Fixed access 'names' from a DistributedDataParallel module (#11023) --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index b752a3c1fe32..ae2363ccf056 100644 --- a/classify/train.py +++ b/classify/train.py @@ -44,7 +44,7 @@ check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, +from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -260,7 +260,7 @@ def train(opt, device): # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') # Log results meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} From feca55719bab7dad14284f77a096da387094dbde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 20:09:02 -0800 Subject: [PATCH 266/277] Update "YOLOv5 is out of date" msg (#11061) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index b6efe6bb8732..b7e38b3a1a50 100644 --- a/utils/general.py +++ b/utils/general.py @@ -338,7 +338,7 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." else: s += f'up to date with {url} ✅' LOGGER.info(s) From 6559d8fcebd1c6abe4f5e100cff82d8fdda3f232 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 21:34:04 -0800 Subject: [PATCH 267/277] Update ci-testing.yml (#11062) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f9c62d623042..83438094b6f6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -79,10 +79,10 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir - # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility - name: Cache pip uses: actions/cache@v3 with: From b8731d855fce77120bf6401f689fb0accd66c2a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 14:16:03 -0800 Subject: [PATCH 268/277] Update requirements.txt (#11065) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eee15ddf93c4..3e6e39d8cc07 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.0 # Snyk vulnerability fix +wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From b005788f36fd329a840879fcfb5975bc5902ada8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:15:50 -0800 Subject: [PATCH 269/277] Update requirements.txt (#11067) Signed-off-by: Glenn Jocher --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e6e39d8cc07..7aa4732d6d78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,6 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 7dee52f94d28e09142717ffff95ee689982364d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:58:59 -0800 Subject: [PATCH 270/277] Update requirements.txt (#11068) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7aa4732d6d78..d67c44c9d812 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ -gitpython +gitpython>=3.1.30 ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 From 3c0a6e664bc3847ab9cca3df66195de6acfeb012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 16:15:07 -0800 Subject: [PATCH 271/277] Security fixes for IPython (#11069) Signed-off-by: Glenn Jocher --- models/common.py | 9 ++++++--- requirements.txt | 2 +- utils/__init__.py | 6 ++++-- utils/general.py | 19 +++++++++++++------ 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index f416ddf25eb8..aa8ae674eb47 100644 --- a/models/common.py +++ b/models/common.py @@ -21,14 +21,13 @@ import requests import torch import torch.nn as nn -from IPython.display import display from PIL import Image from torch.cuda import amp from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -767,7 +766,11 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - display(im) if is_notebook() else im.show(self.files[i]) + if is_jupyter(): + from IPython.display import display + display(im) + else: + im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/requirements.txt b/requirements.txt index d67c44c9d812..11cb9aaaf99e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 -ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 @@ -43,6 +42,7 @@ setuptools>=65.5.1 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- +# ipython # interactive notebook # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP diff --git a/utils/__init__.py b/utils/__init__.py index d158c5515a12..5b9fcd517e03 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -60,17 +60,19 @@ def notebook_init(verbose=True): check_font() import psutil - from IPython import display # to display images and clear console output if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory # System info + display = None if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage('/') - display.clear_output() + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/general.py b/utils/general.py index b7e38b3a1a50..74620460070e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,7 +29,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -77,10 +76,18 @@ def is_colab(): return 'google.colab' in sys.modules -def is_notebook(): - # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace - ipython_type = str(type(IPython.get_ipython())) - return 'colab' in ipython_type or 'zmqshell' in ipython_type +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False def is_kaggle(): @@ -429,7 +436,7 @@ def check_img_size(imgsz, s=32, floor=0): def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_notebook() + assert not is_jupyter() assert not is_docker() cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) From 5ca8e822c8e75cde1d613dea8bfa49009fdc3618 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 10:55:05 -0800 Subject: [PATCH 272/277] Update export.py (#11077) Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index e8287704866a..e167b2088cb1 100644 --- a/export.py +++ b/export.py @@ -413,7 +413,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( From 5c91daeaecaeca709b8b6d13bd571d068fdbd003 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 19:55:23 -0800 Subject: [PATCH 273/277] Update ci-testing.yml (#11079) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 83438094b6f6..7c74fe6fe652 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,12 +25,16 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - #- name: Cache pip - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-Benchmarks-${{ hashFiles('requirements.txt') }} - # restore-keys: ${{ runner.os }}-Benchmarks- + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + id: pip-cache + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility + - name: Cache pip + uses: actions/cache@v3 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} + restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- - name: Install requirements run: | python -m pip install --upgrade pip wheel From 85f6019e5af2641e33139e97415b7bd1dc72d779 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Mon, 6 Mar 2023 22:54:34 +0100 Subject: [PATCH 274/277] Rename evolve folder if default project name (#11108) Save logs to 'runs/evolve-seg' if default project name, 'runs/train-seg' Signed-off-by: Iker Lluvia --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index 2e71de131a8d..c6ac2d5e23d2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -530,8 +530,8 @@ def main(opt, callbacks=Callbacks()): check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') + if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / 'runs/evolve-seg') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': opt.name = Path(opt.cfg).stem # use model.yaml as name From ea05d5cb6c0dc01ef254761f0b140ceab17f9fd3 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Thu, 9 Mar 2023 23:47:53 +0100 Subject: [PATCH 275/277] Correct mutation adding the missing parameters (#11109) * Correct mutation adding the missing parameters Correct mutation considering the higher number of segmentation parameters compared to object detection. Fixes #9730 Signed-off-by: Iker Lluvia * Use already defined segmentation keys from segment/metrics.py --------- Signed-off-by: Iker Lluvia Co-authored-by: Glenn Jocher --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index c6ac2d5e23d2..8ed75ba63e7c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -629,7 +629,7 @@ def main(opt, callbacks=Callbacks()): while all(v == 1): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate + hyp[k] = float(x[i + 12] * v[i]) # mutate # Constrain to limits for k, v in meta.items(): @@ -641,7 +641,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) From 5543b89466d072a9f8f2e31f8257a1ccc7f588e9 Mon Sep 17 00:00:00 2001 From: Sheng Hu Date: Fri, 10 Mar 2023 06:55:02 +0800 Subject: [PATCH 276/277] Fix a visualization bug (#11134) Fix a visualization bug reported here: https://github.com/ultralytics/yolov5/issues/11133 Signed-off-by: Sheng Hu Co-authored-by: Glenn Jocher --- utils/segment/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 3ba097624fcd..1b22ec838ac9 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets From 3e55763d45f9c5f8217e4dad5ba1e6c1f42e3bf8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Mar 2023 02:10:38 +0100 Subject: [PATCH 277/277] Update ci-testing.yml (#11154) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7c74fe6fe652..a6f47bb8811c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,16 +25,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -83,16 +74,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel