custom_detection_train.py 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. from imageai.Detection.Custom import DetectionModelTrainer
  2. trainer = DetectionModelTrainer()
  3. trainer.setModelTypeAsYOLOv3()
  4. # trainer.setDataDirectory(data_directory='roland_esl_3_440_ye')
  5. trainer.setDataDirectory(data_directory='hololens')
  6. # trainer.setTrainConfig(object_names_array=['ROLANDESL3-440 YE'], batch_size=4, num_experiments=200, train_from_pretrained_model='pretrained-yolov3.h5')
  7. trainer.setTrainConfig(object_names_array=['hololens'], batch_size=4, num_experiments=200, train_from_pretrained_model='pretrained-yolov3.h5')
  8. #download pre-trained model via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/pretrained-yolov3.h5
  9. # If you are training to detect more than 1 object, set names of objects above like object_names_array=["hololens", "google-glass", "oculus", "magic-leap"]
  10. trainer.trainModel()
  11. '''
  12. SAMPLE RESULT
  13. Using TensorFlow backend.
  14. Generating anchor boxes for training images and annotation...
  15. Average IOU for 9 anchors: 0.78
  16. Anchor Boxes generated.
  17. Detection configuration saved in hololens/json/detection_config.json
  18. Training on: ['hololens']
  19. Training with Batch Size: 4
  20. Number of Experiments: 200
  21. Epoch 1/200
  22. - 733s - loss: 34.8253 - yolo_layer_1_loss: 6.0920 - yolo_layer_2_loss: 11.1064 - yolo_layer_3_loss: 17.6269 - val_loss: 20.5028 - val_yolo_layer_1_loss: 4.0171 - val_yolo_layer_2_loss: 7.5175 - val_yolo_layer_3_loss: 8.9683
  23. Epoch 2/200
  24. - 648s - loss: 11.1396 - yolo_layer_1_loss: 2.1209 - yolo_layer_2_loss: 4.0063 - yolo_layer_3_loss: 5.0124 - val_loss: 7.6188 - val_yolo_layer_1_loss: 1.8513 - val_yolo_layer_2_loss: 2.2446 - val_yolo_layer_3_loss: 3.5229
  25. Epoch 3/200
  26. - 674s - loss: 6.4360 - yolo_layer_1_loss: 1.3500 - yolo_layer_2_loss: 2.2343 - yolo_layer_3_loss: 2.8518 - val_loss: 7.2326 - val_yolo_layer_1_loss: 1.8762 - val_yolo_layer_2_loss: 2.3802 - val_yolo_layer_3_loss: 2.9762
  27. Epoch 4/200
  28. - 634s - loss: 5.3801 - yolo_layer_1_loss: 1.0323 - yolo_layer_2_loss: 1.7854 - yolo_layer_3_loss: 2.5624 - val_loss: 6.3730 - val_yolo_layer_1_loss: 1.4272 - val_yolo_layer_2_loss: 2.0534 - val_yolo_layer_3_loss: 2.8924
  29. Epoch 5/200
  30. - 645s - loss: 5.2569 - yolo_layer_1_loss: 0.9953 - yolo_layer_2_loss: 1.8611 - yolo_layer_3_loss: 2.4005 - val_loss: 6.0458 - val_yolo_layer_1_loss: 1.7037 - val_yolo_layer_2_loss: 1.9754 - val_yolo_layer_3_loss: 2.3667
  31. Epoch 6/200
  32. - 655s - loss: 4.7582 - yolo_layer_1_loss: 0.9959 - yolo_layer_2_loss: 1.5986 - yolo_layer_3_loss: 2.1637 - val_loss: 5.8313 - val_yolo_layer_1_loss: 1.1880 - val_yolo_layer_2_loss: 1.9962 - val_yolo_layer_3_loss: 2.6471
  33. Epoch 7/200
  34. '''