Skip to content

Instantly share code, notes, and snippets.

@NobuoTsukamoto
Last active January 7, 2022 06:54
Show Gist options
  • Save NobuoTsukamoto/f48df315be490dcf0c76375c2e04ddb1 to your computer and use it in GitHub Desktop.
Save NobuoTsukamoto/f48df315be490dcf0c76375c2e04ddb1 to your computer and use it in GitHub Desktop.
export_tfv2_lite_models.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "export_tfv2_lite_models.ipynb",
"provenance": [],
"collapsed_sections": [],
"toc_visible": true,
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"c6c33c8802b24f1eb834bd27514d2e6c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_1eb6f25adc6a4d6d8f565c4ebf7d3bb7",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_fe790de6b04348efbebb4f5b915ac89e",
"IPY_MODEL_ee8a2ae32252413d80a7fc6d1af3aaf3"
]
}
},
"1eb6f25adc6a4d6d8f565c4ebf7d3bb7": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"fe790de6b04348efbebb4f5b915ac89e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_63536abdfa834be0820f777ccc9c6482",
"_dom_classes": [],
"description": "Dl Completed...: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_97667762970d47c0b82a7e467e267661"
}
},
"ee8a2ae32252413d80a7fc6d1af3aaf3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_5040edd8595a4bd8ae068e8dd617d4c8",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2/2 [00:39<00:00, 19.90s/ url]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_e483b80d89a14b5a8dd2f60dd4427877"
}
},
"63536abdfa834be0820f777ccc9c6482": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"97667762970d47c0b82a7e467e267661": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"5040edd8595a4bd8ae068e8dd617d4c8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"e483b80d89a14b5a8dd2f60dd4427877": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"a2098117f4d740479374cb17efd6a300": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_eabd27cb9f9e48bea062b93716afd797",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_d5d7bf792e114cff8d4aad94764238ae",
"IPY_MODEL_4f5728877fb847529b539db1e02598ab"
]
}
},
"eabd27cb9f9e48bea062b93716afd797": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"d5d7bf792e114cff8d4aad94764238ae": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_b901e45d20aa4b83b1136365e8f28a23",
"_dom_classes": [],
"description": "Dl Size...: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_f3f84dc56a684b4a89689f2fe42ce0c5"
}
},
"4f5728877fb847529b539db1e02598ab": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_a02784634caa40c9a86497cd4a76199c",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 868/868 [00:39<00:00, 21.84 MiB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_5be156abe4b24738a313d6c407663381"
}
},
"b901e45d20aa4b83b1136365e8f28a23": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"f3f84dc56a684b4a89689f2fe42ce0c5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"a02784634caa40c9a86497cd4a76199c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"5be156abe4b24738a313d6c407663381": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"e267554d2c03444ab8c1f66cc9f14344": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_0fd228985a4c4e4791497da8c3efc623",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_09eb7de0c90d4c55a9fb4517a6dd1a1c",
"IPY_MODEL_c359d87de02e4aa28dabcf19f4955cf1"
]
}
},
"0fd228985a4c4e4791497da8c3efc623": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"09eb7de0c90d4c55a9fb4517a6dd1a1c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_39e405538bda45d481b867a1293d1a7c",
"_dom_classes": [],
"description": "Extraction completed...: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_766f93069a3747fd83c46edcf67bfb55"
}
},
"c359d87de02e4aa28dabcf19f4955cf1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_1a2124f1414b411681eddc5f975bc068",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2/2 [00:39<00:00, 19.84s/ file]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_d483f595d8ac496194a292308c90f7c7"
}
},
"39e405538bda45d481b867a1293d1a7c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"766f93069a3747fd83c46edcf67bfb55": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"1a2124f1414b411681eddc5f975bc068": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"d483f595d8ac496194a292308c90f7c7": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"8c9d84ef99ce41cfa940468ab97bd643": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_95ef71f4eeab4dc993ce5424bfd7d6b0",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_84700e99131d47f0b00c88336fafb539",
"IPY_MODEL_86e2134b0917456887b024f45a378099"
]
}
},
"95ef71f4eeab4dc993ce5424bfd7d6b0": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"84700e99131d47f0b00c88336fafb539": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_733eb6c8496f4298b7e1caf86ce0e75d",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "info",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6f930d85e4564b6aa2f579bed2f5c8e1"
}
},
"86e2134b0917456887b024f45a378099": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_1cffeff74f3a47afaad3bc08e13c8051",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 4952/0 [00:09<00:00, 534.85 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_676b48c57421443b9b13871e387099aa"
}
},
"733eb6c8496f4298b7e1caf86ce0e75d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"6f930d85e4564b6aa2f579bed2f5c8e1": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"1cffeff74f3a47afaad3bc08e13c8051": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"676b48c57421443b9b13871e387099aa": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"17beaa0b366f4c8a868624b761986bde": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_77469814c3d44e25b83133e0d218f6c1",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_410941b64d2e4a1899e406a50d34f0e0",
"IPY_MODEL_d70f7f2b0bb640959cc42bf13d8ac7cd"
]
}
},
"77469814c3d44e25b83133e0d218f6c1": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"410941b64d2e4a1899e406a50d34f0e0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_0e3b1af66aee459a8e8e924626495100",
"_dom_classes": [],
"description": " 98%",
"_model_name": "FloatProgressModel",
"bar_style": "danger",
"max": 4952,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 4833,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_0908d42b4dc64f24a6eea2e65552c1a0"
}
},
"d70f7f2b0bb640959cc42bf13d8ac7cd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_a165d1ac7cbc4cdc9aa08f634f06ec19",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 4833/4952 [00:01<00:00, 4693.23 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_719b2709b4fa46d2a41c5a3fa26b1208"
}
},
"0e3b1af66aee459a8e8e924626495100": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"0908d42b4dc64f24a6eea2e65552c1a0": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"a165d1ac7cbc4cdc9aa08f634f06ec19": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"719b2709b4fa46d2a41c5a3fa26b1208": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"dd2b2514c7f34f7e84711446e3bb3fda": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_47b778be401f47d6be56d05772173546",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_d0887ffbf27847d5a989820f4b37e8c2",
"IPY_MODEL_60f0fd9c328d4c99a3ab7723413b560d"
]
}
},
"47b778be401f47d6be56d05772173546": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"d0887ffbf27847d5a989820f4b37e8c2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_6d344c7f8c9a4b96beb6fa86ddea420b",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "info",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_a5039d6aef3043ab95c59c913f307b61"
}
},
"60f0fd9c328d4c99a3ab7723413b560d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_801720bae1f54c879a64ee894b449340",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2501/0 [00:04<00:00, 535.37 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_45847174a2664049a5fba7e5eb540045"
}
},
"6d344c7f8c9a4b96beb6fa86ddea420b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"a5039d6aef3043ab95c59c913f307b61": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"801720bae1f54c879a64ee894b449340": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"45847174a2664049a5fba7e5eb540045": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"46a5a8506b424b1e9449bc8347b82e09": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_25bca32e11a04c9db44fa623ab53f7b8",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_092c48c6a2d34da8811b78fae9a7f82b",
"IPY_MODEL_2f6a2d9925a243529dcfea1374c58a9a"
]
}
},
"25bca32e11a04c9db44fa623ab53f7b8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"092c48c6a2d34da8811b78fae9a7f82b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_527f7a4dfa6147f58b4e933924851cdd",
"_dom_classes": [],
"description": " 90%",
"_model_name": "FloatProgressModel",
"bar_style": "danger",
"max": 2501,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 2262,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_e693abc4b6be4675ab45ba5be581f80e"
}
},
"2f6a2d9925a243529dcfea1374c58a9a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_83974c17de884718af9a74dcfd417ac2",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2262/2501 [00:00<00:00, 4381.29 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_0032ba43a36e4bf3931be77304ab677e"
}
},
"527f7a4dfa6147f58b4e933924851cdd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"e693abc4b6be4675ab45ba5be581f80e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"83974c17de884718af9a74dcfd417ac2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"0032ba43a36e4bf3931be77304ab677e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"b1fd20e7c402489e876d74d35a2517d4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_a6bec0adbcb54daa87287fde70964557",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_2c40344c57614303a0388b44d841eab0",
"IPY_MODEL_7739161f06404f50ae896685f3e96112"
]
}
},
"a6bec0adbcb54daa87287fde70964557": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"2c40344c57614303a0388b44d841eab0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_d3a23be3e72b4a1bb9030c44a9da2e8a",
"_dom_classes": [],
"description": "",
"_model_name": "FloatProgressModel",
"bar_style": "info",
"max": 1,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 1,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_f57e4f7b71594724b68f8c13e0cb6dc6"
}
},
"7739161f06404f50ae896685f3e96112": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_c87d1b9b1f50445096cca29fff6f9662",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2510/0 [00:04<00:00, 513.98 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_6623b9366f9d4337b4dd43b795ee44c8"
}
},
"d3a23be3e72b4a1bb9030c44a9da2e8a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"f57e4f7b71594724b68f8c13e0cb6dc6": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"c87d1b9b1f50445096cca29fff6f9662": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"6623b9366f9d4337b4dd43b795ee44c8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"aa259e17ec8b44a89c858f264caba561": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_34bd96f53fd34847b1f3fd6d87af5f83",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_6ddac74a9a41402dbd5fb49576ec7c92",
"IPY_MODEL_e432648adadc4626bc8ef0b0da9fc163"
]
}
},
"34bd96f53fd34847b1f3fd6d87af5f83": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"6ddac74a9a41402dbd5fb49576ec7c92": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_aaa0856b95c9408691232f0809e65ad5",
"_dom_classes": [],
"description": " 87%",
"_model_name": "FloatProgressModel",
"bar_style": "danger",
"max": 2510,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 2185,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_e7f61a16692d436e9d6763385e2820c9"
}
},
"e432648adadc4626bc8ef0b0da9fc163": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_6982ccb02cfb4fb592aed553c4a5a5ab",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 2185/2510 [00:00<00:00, 4230.40 examples/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_a532af0adda44d0090cee1b0a6a25183"
}
},
"aaa0856b95c9408691232f0809e65ad5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"e7f61a16692d436e9d6763385e2820c9": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"6982ccb02cfb4fb592aed553c4a5a5ab": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"a532af0adda44d0090cee1b0a6a25183": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
}
}
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/NobuoTsukamoto/f48df315be490dcf0c76375c2e04ddb1/export_tfv2_lite_models.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "FifIndY2JqGY"
},
"source": [
"# Export **\"TensorFlow 2 Detection Models\"** to TF-Lite, Edge TPU Models\n",
"\n",
"This notebook converts the pre-trained model of [\"TensorFlow 2 Detection Model Zoo\"](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) into TF-Lite or Edge TPU Model.<br>\n",
"It works with the following repository code.<br>\n",
"https://github.com/NobuoTsukamoto/edge_tpu/tree/master/detection\n",
"<br><br>\n",
"The following models are targeted.<br>\n",
"\n",
"|Model Name (COCO17)|INT8|EdgeTPU|\n",
"|:---|:---|:---|\n",
"|SSD MobileNet v2 300x300 *1|Yes|Yes|\n",
"|SSD MobileNet V1 FPN 640x640|Yes|Yes|\n",
"|SSD MobileNet V2 FPNLite 320x320|Yes|Yes|\n",
"|SSD MobileNet V2 FPNLite 640x640|Yes|Yes|\n",
"|SSD ResNet50 V1 FPN 640x640 (RetinaNet50)|Yes|NG *2|\n",
"|SSD ResNet50 V1 FPN 1024x1024 (RetinaNet50)|Yes|NG *2|\n",
"|SSD ResNet101 V1 FPN 640x640 (RetinaNet101)|Yes|NG *2|\n",
"|SSD ResNet101 V1 FPN 1024x1024 (RetinaNet101)|Yes|NG *2|\n",
"|SSD ResNet152 V1 FPN 640x640 (RetinaNet152)|Yes|NG *2|\n",
"|SSD ResNet152 V1 FPN 1024x1024 (RetinaNet152)|Yes|NG *2|\n",
"\n",
"*1: \"SSD MobileNet v2 320x320\", but the content of the pipline.config file is 300x300. \n",
"*2: Edge TPU Compiler version 15.0.340273435 is Internal compiler error. Aborting! \n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "GRT8DYnKYS85"
},
"source": [
"# Setup\n",
"- Mount Google Drive\n",
"- TensorFlow Object detection API (TF2.x)\n",
"- EdgeTPU Compiler "
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "IFU6uBFgMDbo"
},
"source": [
"## Mount Google Drive"
]
},
{
"cell_type": "code",
"metadata": {
"id": "BF5HKw_M2QsF",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "b48d4523-2513-4750-940c-8dd0e567016b"
},
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Mounted at /content/drive\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9CToY_VmncWq"
},
"source": [
"## Using TensorFlow 2.x"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "rdIbeh3eLEHq"
},
"source": [
"Note: TensorFlow v2.3.0 gives an error and cannot be converted.\n",
"```\n",
"/usr/local/lib/python3.6/dist-packages/tensorflow/lite/python/optimize/calibrator.py in __init__(self, model_content)\n",
" 52 _calibration_wrapper.CalibrationWrapper(model_content))\n",
" 53 except Exception as e:\n",
"---> 54 raise ValueError(\"Failed to parse the model: %s.\" % e)\n",
" 55 if not self._calibrator:\n",
" 56 raise ValueError(\"Failed to parse the model.\")\n",
"\n",
"ValueError: Failed to parse the model: pybind11::init(): factory function returned nullptr.\n",
"```"
]
},
{
"cell_type": "code",
"metadata": {
"id": "y7erbNmjnWLF",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "61f6a2f8-e081-4918-ffd5-2dff2c0229c7"
},
"source": [
"#%tensorflow_version 2.x\n",
"!pip install tf-nightly"
],
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"text": [
"Collecting tf-nightly\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/67/5d/3e29f371119466ff04f84c041182c10384c327723b6764b14facc403ceab/tf_nightly-2.5.0.dev20210321-cp37-cp37m-manylinux2010_x86_64.whl (452.0MB)\n",
"\u001b[K |████████████████████████████████| 452.0MB 35kB/s \n",
"\u001b[?25hRequirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.12.1)\n",
"Collecting h5py~=3.1.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/9d/74/9eae2bedd8201ab464308f42c601a12d79727a1c87f0c867fdefb212c6cf/h5py-3.1.0-cp37-cp37m-manylinux1_x86_64.whl (4.0MB)\n",
"\u001b[K |████████████████████████████████| 4.0MB 32.9MB/s \n",
"\u001b[?25hRequirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.12)\n",
"Collecting tb-nightly~=2.5.0.a\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/0c/2f/e07b1ea86e5608f2e0030ce7376a8052162fd4b763475ea680bde9011219/tb_nightly-2.5.0a20210321-py3-none-any.whl (6.0MB)\n",
"\u001b[K |████████████████████████████████| 6.0MB 39.5MB/s \n",
"\u001b[?25hRequirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.1.2)\n",
"Requirement already satisfied: numpy~=1.19.2 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.19.5)\n",
"Requirement already satisfied: termcolor~=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.1.0)\n",
"Requirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (3.12.4)\n",
"Requirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (0.36.2)\n",
"Collecting gast==0.4.0\n",
" Downloading https://files.pythonhosted.org/packages/b6/48/583c032b79ae5b3daa02225a675aeb673e58d2cb698e78510feceb11958c/gast-0.4.0-py3-none-any.whl\n",
"Requirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (0.2.0)\n",
"Collecting tf-estimator-nightly~=2.5.0.dev\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/e4/35/c13b080a8060d8ff313b2f325bb62820320120da196b313284e8ec968268/tf_estimator_nightly-2.5.0.dev2021032101-py2.py3-none-any.whl (462kB)\n",
"\u001b[K |████████████████████████████████| 471kB 25.7MB/s \n",
"\u001b[?25hRequirement already satisfied: absl-py~=0.10 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (0.10.0)\n",
"Requirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.6.3)\n",
"Requirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (3.3.0)\n",
"Collecting grpcio~=1.34.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/d5/d1/f38a91d8724706427fe973a7dfa11e938cee98aa7196b03d870a25a08bab/grpcio-1.34.1-cp37-cp37m-manylinux2014_x86_64.whl (4.0MB)\n",
"\u001b[K |████████████████████████████████| 4.0MB 29.8MB/s \n",
"\u001b[?25hRequirement already satisfied: typing-extensions~=3.7.4 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (3.7.4.3)\n",
"Requirement already satisfied: six~=1.15.0 in /usr/local/lib/python3.7/dist-packages (from tf-nightly) (1.15.0)\n",
"Collecting cached-property; python_version < \"3.8\"\n",
" Downloading https://files.pythonhosted.org/packages/48/19/f2090f7dad41e225c7f2326e4cfe6fff49e57dedb5b53636c9551f86b069/cached_property-1.5.2-py2.py3-none-any.whl\n",
"Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (2.23.0)\n",
"Collecting tensorboard-data-server<0.6.0,>=0.5.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/b2/81/a227983b2d5d858611e460bbf4246b432ca0072c58527b98e3a0b4989ede/tensorboard_data_server-0.5.0-py3-none-manylinux2010_x86_64.whl (3.9MB)\n",
"\u001b[K |████████████████████████████████| 3.9MB 31.5MB/s \n",
"\u001b[?25hRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (0.4.3)\n",
"Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (1.27.1)\n",
"Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (1.8.0)\n",
"Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (54.1.2)\n",
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (3.3.4)\n",
"Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tb-nightly~=2.5.0.a->tf-nightly) (1.0.1)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tb-nightly~=2.5.0.a->tf-nightly) (2020.12.5)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tb-nightly~=2.5.0.a->tf-nightly) (1.24.3)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tb-nightly~=2.5.0.a->tf-nightly) (2.10)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tb-nightly~=2.5.0.a->tf-nightly) (3.0.4)\n",
"Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tb-nightly~=2.5.0.a->tf-nightly) (1.3.0)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tb-nightly~=2.5.0.a->tf-nightly) (0.2.8)\n",
"Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tb-nightly~=2.5.0.a->tf-nightly) (4.2.1)\n",
"Requirement already satisfied: rsa<5,>=3.1.4; python_version >= \"3.6\" in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tb-nightly~=2.5.0.a->tf-nightly) (4.7.2)\n",
"Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tb-nightly~=2.5.0.a->tf-nightly) (3.7.2)\n",
"Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tb-nightly~=2.5.0.a->tf-nightly) (3.1.0)\n",
"Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tb-nightly~=2.5.0.a->tf-nightly) (0.4.8)\n",
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tb-nightly~=2.5.0.a->tf-nightly) (3.4.1)\n",
"\u001b[31mERROR: tensorflow 2.4.1 has requirement gast==0.3.3, but you'll have gast 0.4.0 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: tensorflow 2.4.1 has requirement grpcio~=1.32.0, but you'll have grpcio 1.34.1 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: tensorflow 2.4.1 has requirement h5py~=2.10.0, but you'll have h5py 3.1.0 which is incompatible.\u001b[0m\n",
"Installing collected packages: cached-property, h5py, tensorboard-data-server, grpcio, tb-nightly, gast, tf-estimator-nightly, tf-nightly\n",
" Found existing installation: h5py 2.10.0\n",
" Uninstalling h5py-2.10.0:\n",
" Successfully uninstalled h5py-2.10.0\n",
" Found existing installation: grpcio 1.32.0\n",
" Uninstalling grpcio-1.32.0:\n",
" Successfully uninstalled grpcio-1.32.0\n",
" Found existing installation: gast 0.3.3\n",
" Uninstalling gast-0.3.3:\n",
" Successfully uninstalled gast-0.3.3\n",
"Successfully installed cached-property-1.5.2 gast-0.4.0 grpcio-1.34.1 h5py-3.1.0 tb-nightly-2.5.0a20210321 tensorboard-data-server-0.5.0 tf-estimator-nightly-2.5.0.dev2021032101 tf-nightly-2.5.0.dev20210321\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "xsNJDI7NFnry"
},
"source": [
"import os\n",
"import subprocess\n",
"from collections import namedtuple\n",
"\n",
"import numpy as np\n",
"\n",
"import tensorflow as tf\n",
"import tensorflow_datasets as tfds"
],
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "xHKPWSLtVWJU",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "8040e2fc-c6ea-4602-9b4b-933a6c6241a7"
},
"source": [
"tf.__version__"
],
"execution_count": 4,
"outputs": [
{
"output_type": "execute_result",
"data": {
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
},
"text/plain": [
"'2.5.0-dev20210321'"
]
},
"metadata": {
"tags": []
},
"execution_count": 4
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 416,
"referenced_widgets": [
"c6c33c8802b24f1eb834bd27514d2e6c",
"1eb6f25adc6a4d6d8f565c4ebf7d3bb7",
"fe790de6b04348efbebb4f5b915ac89e",
"ee8a2ae32252413d80a7fc6d1af3aaf3",
"63536abdfa834be0820f777ccc9c6482",
"97667762970d47c0b82a7e467e267661",
"5040edd8595a4bd8ae068e8dd617d4c8",
"e483b80d89a14b5a8dd2f60dd4427877",
"a2098117f4d740479374cb17efd6a300",
"eabd27cb9f9e48bea062b93716afd797",
"d5d7bf792e114cff8d4aad94764238ae",
"4f5728877fb847529b539db1e02598ab",
"b901e45d20aa4b83b1136365e8f28a23",
"f3f84dc56a684b4a89689f2fe42ce0c5",
"a02784634caa40c9a86497cd4a76199c",
"5be156abe4b24738a313d6c407663381",
"e267554d2c03444ab8c1f66cc9f14344",
"0fd228985a4c4e4791497da8c3efc623",
"09eb7de0c90d4c55a9fb4517a6dd1a1c",
"c359d87de02e4aa28dabcf19f4955cf1",
"39e405538bda45d481b867a1293d1a7c",
"766f93069a3747fd83c46edcf67bfb55",
"1a2124f1414b411681eddc5f975bc068",
"d483f595d8ac496194a292308c90f7c7",
"8c9d84ef99ce41cfa940468ab97bd643",
"95ef71f4eeab4dc993ce5424bfd7d6b0",
"84700e99131d47f0b00c88336fafb539",
"86e2134b0917456887b024f45a378099",
"733eb6c8496f4298b7e1caf86ce0e75d",
"6f930d85e4564b6aa2f579bed2f5c8e1",
"1cffeff74f3a47afaad3bc08e13c8051",
"676b48c57421443b9b13871e387099aa",
"17beaa0b366f4c8a868624b761986bde",
"77469814c3d44e25b83133e0d218f6c1",
"410941b64d2e4a1899e406a50d34f0e0",
"d70f7f2b0bb640959cc42bf13d8ac7cd",
"0e3b1af66aee459a8e8e924626495100",
"0908d42b4dc64f24a6eea2e65552c1a0",
"a165d1ac7cbc4cdc9aa08f634f06ec19",
"719b2709b4fa46d2a41c5a3fa26b1208",
"dd2b2514c7f34f7e84711446e3bb3fda",
"47b778be401f47d6be56d05772173546",
"d0887ffbf27847d5a989820f4b37e8c2",
"60f0fd9c328d4c99a3ab7723413b560d",
"6d344c7f8c9a4b96beb6fa86ddea420b",
"a5039d6aef3043ab95c59c913f307b61",
"801720bae1f54c879a64ee894b449340",
"45847174a2664049a5fba7e5eb540045",
"46a5a8506b424b1e9449bc8347b82e09",
"25bca32e11a04c9db44fa623ab53f7b8",
"092c48c6a2d34da8811b78fae9a7f82b",
"2f6a2d9925a243529dcfea1374c58a9a",
"527f7a4dfa6147f58b4e933924851cdd",
"e693abc4b6be4675ab45ba5be581f80e",
"83974c17de884718af9a74dcfd417ac2",
"0032ba43a36e4bf3931be77304ab677e",
"b1fd20e7c402489e876d74d35a2517d4",
"a6bec0adbcb54daa87287fde70964557",
"2c40344c57614303a0388b44d841eab0",
"7739161f06404f50ae896685f3e96112",
"d3a23be3e72b4a1bb9030c44a9da2e8a",
"f57e4f7b71594724b68f8c13e0cb6dc6",
"c87d1b9b1f50445096cca29fff6f9662",
"6623b9366f9d4337b4dd43b795ee44c8",
"aa259e17ec8b44a89c858f264caba561",
"34bd96f53fd34847b1f3fd6d87af5f83",
"6ddac74a9a41402dbd5fb49576ec7c92",
"e432648adadc4626bc8ef0b0da9fc163",
"aaa0856b95c9408691232f0809e65ad5",
"e7f61a16692d436e9d6763385e2820c9",
"6982ccb02cfb4fb592aed553c4a5a5ab",
"a532af0adda44d0090cee1b0a6a25183"
]
},
"id": "V8hvSyWrVjey",
"outputId": "9cea3010-c808-49a9-a9bc-bb38ab90524e"
},
"source": [
"raw_test_data, info = tfds.load(name=\"voc/2007\",\n",
" with_info=True,\n",
" split='test')"
],
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"text": [
"\u001b[1mDownloading and preparing dataset voc/2007/4.0.0 (download: 868.85 MiB, generated: Unknown size, total: 868.85 MiB) to /root/tensorflow_datasets/voc/2007/4.0.0...\u001b[0m\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "c6c33c8802b24f1eb834bd27514d2e6c",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Dl Completed...', max=1.0, style=Progre…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a2098117f4d740479374cb17efd6a300",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Dl Size...', max=1.0, style=ProgressSty…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e267554d2c03444ab8c1f66cc9f14344",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', description='Extraction completed...', max=1.0, styl…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\n",
"\n",
"\n",
"\n",
"\n",
"\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "8c9d84ef99ce41cfa940468ab97bd643",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\rShuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteKY2B3M/voc-test.tfrecord\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "17beaa0b366f4c8a868624b761986bde",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=4952.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "dd2b2514c7f34f7e84711446e3bb3fda",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\rShuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteKY2B3M/voc-train.tfrecord\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "46a5a8506b424b1e9449bc8347b82e09",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=2501.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\r"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b1fd20e7c402489e876d74d35a2517d4",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\rShuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteKY2B3M/voc-validation.tfrecord\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "aa259e17ec8b44a89c858f264caba561",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=2510.0), HTML(value='')))"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"\u001b[1mDataset voc downloaded and prepared to /root/tensorflow_datasets/voc/2007/4.0.0. Subsequent calls will reuse this data.\u001b[0m\n",
"\r"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9rv_47YDnlka"
},
"source": [
"## Setup TensorFlow Object detection API"
]
},
{
"cell_type": "code",
"metadata": {
"id": "j8wSH6LEnkWh",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "a8cbbbc7-2698-410b-e8cb-7271e3844fcc"
},
"source": [
"!git clone https://github.com/tensorflow/models.git\n",
"%cd models/\n",
"!git checkout 0ea84d6df6220552cd5229a0181c9e420f815408"
],
"execution_count": 6,
"outputs": [
{
"output_type": "stream",
"text": [
"Cloning into 'models'...\n",
"remote: Enumerating objects: 32, done.\u001b[K\n",
"remote: Counting objects: 100% (32/32), done.\u001b[K\n",
"remote: Compressing objects: 100% (31/31), done.\u001b[K\n",
"remote: Total 53961 (delta 12), reused 21 (delta 1), pack-reused 53929\u001b[K\n",
"Receiving objects: 100% (53961/53961), 569.78 MiB | 25.54 MiB/s, done.\n",
"Resolving deltas: 100% (37058/37058), done.\n",
"/content/models\n",
"Note: checking out '0ea84d6df6220552cd5229a0181c9e420f815408'.\n",
"\n",
"You are in 'detached HEAD' state. You can look around, make experimental\n",
"changes and commit them, and you can discard any commits you make in this\n",
"state without impacting any branches by performing another checkout.\n",
"\n",
"If you want to create a new branch to retain commits you create, you may\n",
"do so (now or later) by using -b with the checkout command again. Example:\n",
"\n",
" git checkout -b <new-branch-name>\n",
"\n",
"HEAD is now at 0ea84d6d Changing the default number of channels for lighter-weight hourglass backbones.\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "AJU35X1coeWG",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "00ff4425-fa30-4696-f190-5b56cc962ffd"
},
"source": [
"!apt --quiet update\n",
"!apt install --quiet protobuf-compiler"
],
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"text": [
"Get:1 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]\n",
"Ign:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\n",
"Get:3 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\n",
"Ign:4 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\n",
"Hit:5 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\n",
"Hit:6 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\n",
"Get:7 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]\n",
"Hit:8 http://archive.ubuntu.com/ubuntu bionic InRelease\n",
"Get:10 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\n",
"Hit:12 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\n",
"Get:13 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\n",
"Get:14 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [339 kB]\n",
"Hit:15 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease\n",
"Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2,439 kB]\n",
"Get:17 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease [21.3 kB]\n",
"Get:18 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2,009 kB]\n",
"Get:19 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,747 kB]\n",
"Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [368 kB]\n",
"Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,165 kB]\n",
"Get:22 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [894 kB]\n",
"Get:23 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic/main amd64 Packages [49.4 kB]\n",
"Fetched 10.3 MB in 7s (1,453 kB/s)\n",
"Reading package lists...\n",
"Building dependency tree...\n",
"Reading state information...\n",
"53 packages can be upgraded. Run 'apt list --upgradable' to see them.\n",
"Reading package lists...\n",
"Building dependency tree...\n",
"Reading state information...\n",
"protobuf-compiler is already the newest version (3.0.0-9.1ubuntu1).\n",
"0 upgraded, 0 newly installed, 0 to remove and 53 not upgraded.\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "mgTuKF-Ln6Xy",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "ae0e3797-9da0-409a-eb3b-798982182761"
},
"source": [
"%cd research\n",
"!protoc object_detection/protos/*.proto --python_out=.\n",
"!cp object_detection/packages/tf2/setup.py .\n",
"!python3 -m pip install ."
],
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"text": [
"/content/models/research\n",
"Processing /content/models/research\n",
"Collecting avro-python3\n",
" Downloading https://files.pythonhosted.org/packages/cc/97/7a6970380ca8db9139a3cc0b0e3e0dd3e4bc584fb3644e1d06e71e1a55f0/avro-python3-1.10.2.tar.gz\n",
"Collecting apache-beam\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/e1/e7/d6e5a3786d9a037a38af966bf154bcd6cb3cbea2edffda00cf6c417cc9a2/apache_beam-2.28.0-cp37-cp37m-manylinux2010_x86_64.whl (9.0MB)\n",
"\u001b[K |████████████████████████████████| 9.0MB 6.6MB/s \n",
"\u001b[?25hRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (7.0.0)\n",
"Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (4.2.6)\n",
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (3.2.2)\n",
"Requirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.29.22)\n",
"Requirement already satisfied: contextlib2 in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.5.5)\n",
"Collecting tf-slim\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/97/b0f4a64df018ca018cc035d44f2ef08f91e2e8aa67271f6f19633a015ff7/tf_slim-1.1.0-py2.py3-none-any.whl (352kB)\n",
"\u001b[K |████████████████████████████████| 358kB 40.3MB/s \n",
"\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.15.0)\n",
"Requirement already satisfied: pycocotools in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.0.2)\n",
"Collecting lvis\n",
" Downloading https://files.pythonhosted.org/packages/72/b6/1992240ab48310b5360bfdd1d53163f43bb97d90dc5dc723c67d41c38e78/lvis-0.5.3-py3-none-any.whl\n",
"Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.4.1)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.1.5)\n",
"Collecting tf-models-official\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/57/4a/23a08f8fd2747867ee223612e219eeb0d11c36116601d99b55ef3c72e707/tf_models_official-2.4.0-py2.py3-none-any.whl (1.1MB)\n",
"\u001b[K |████████████████████████████████| 1.1MB 43.8MB/s \n",
"\u001b[?25hRequirement already satisfied: protobuf<4,>=3.12.2 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (3.12.4)\n",
"Requirement already satisfied: grpcio<2,>=1.29.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.34.1)\n",
"Requirement already satisfied: python-dateutil<3,>=2.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (2.8.1)\n",
"Requirement already satisfied: oauth2client<5,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (4.1.3)\n",
"Requirement already satisfied: pymongo<4.0.0,>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (3.11.3)\n",
"Collecting mock<3.0.0,>=1.0.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/e6/35/f187bdf23be87092bd0f1200d43d23076cee4d0dec109f195173fd3ebc79/mock-2.0.0-py2.py3-none-any.whl (56kB)\n",
"\u001b[K |████████████████████████████████| 61kB 7.3MB/s \n",
"\u001b[?25hRequirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.3.0)\n",
"Collecting dill<0.3.2,>=0.3.1.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz (151kB)\n",
"\u001b[K |████████████████████████████████| 153kB 29.7MB/s \n",
"\u001b[?25hRequirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.7)\n",
"Collecting pyarrow<3.0.0,>=0.15.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/2e/8d/c002e27767595f22aa09ed0d364327922f673d12b36526c967a2bf6b2ed7/pyarrow-2.0.0-cp37-cp37m-manylinux2014_x86_64.whl (17.7MB)\n",
"\u001b[K |████████████████████████████████| 17.7MB 205kB/s \n",
"\u001b[?25hRequirement already satisfied: httplib2<0.18.0,>=0.8 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (0.17.4)\n",
"Collecting hdfs<3.0.0,>=2.1.0\n",
" Downloading https://files.pythonhosted.org/packages/08/f7/4c3fad73123a24d7394b6f40d1ec9c1cbf2e921cfea1797216ffd0a51fb1/hdfs-2.6.0-py3-none-any.whl\n",
"Requirement already satisfied: pytz>=2018.3 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (2018.9)\n",
"Collecting future<1.0.0,>=0.18.2\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz (829kB)\n",
"\u001b[K |████████████████████████████████| 829kB 37.5MB/s \n",
"\u001b[?25hRequirement already satisfied: typing-extensions<3.8.0,>=3.7.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (3.7.4.3)\n",
"Collecting requests<3.0.0,>=2.24.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/29/c1/24814557f1d22c56d50280771a17307e6bf87b70727d975fd6b2ce6b014a/requests-2.25.1-py2.py3-none-any.whl (61kB)\n",
"\u001b[K |████████████████████████████████| 61kB 6.6MB/s \n",
"\u001b[?25hCollecting fastavro<2,>=0.21.4\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/e3/ff/8f147013c646128680a1b5c047f1db7ebbe171bb8ff3396937e640c0825d/fastavro-1.3.4-cp37-cp37m-manylinux2014_x86_64.whl (2.2MB)\n",
"\u001b[K |████████████████████████████████| 2.2MB 41.8MB/s \n",
"\u001b[?25hRequirement already satisfied: numpy<1.20.0,>=1.14.3 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.19.5)\n",
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->object-detection==0.1) (1.3.1)\n",
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->object-detection==0.1) (0.10.0)\n",
"Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->object-detection==0.1) (2.4.7)\n",
"Requirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from tf-slim->object-detection==0.1) (0.10.0)\n",
"Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.7/dist-packages (from pycocotools->object-detection==0.1) (54.1.2)\n",
"Requirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (4.1.2.30)\n",
"Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (1.12.8)\n",
"Collecting seqeval\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/9d/2d/233c79d5b4e5ab1dbf111242299153f3caddddbb691219f363ad55ce783d/seqeval-1.2.2.tar.gz (43kB)\n",
"\u001b[K |████████████████████████████████| 51kB 5.9MB/s \n",
"\u001b[?25hRequirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (1.5.10)\n",
"Requirement already satisfied: tensorflow>=2.4.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (2.4.1)\n",
"Requirement already satisfied: google-cloud-bigquery>=0.31.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (1.21.0)\n",
"Requirement already satisfied: gin-config in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (0.4.0)\n",
"Collecting opencv-python-headless\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/6d/6d/92f377bece9b0ec9c893081dbe073a65b38d7ac12ef572b8f70554d08760/opencv_python_headless-4.5.1.48-cp37-cp37m-manylinux2014_x86_64.whl (37.6MB)\n",
"\u001b[K |████████████████████████████████| 37.6MB 79kB/s \n",
"\u001b[?25hRequirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (0.11.0)\n",
"Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (4.0.1)\n",
"Collecting pyyaml>=5.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/7a/a5/393c087efdc78091afa2af9f1378762f9821c9c1d7a22c5753fb5ac5f97a/PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl (636kB)\n",
"\u001b[K |████████████████████████████████| 645kB 42.2MB/s \n",
"\u001b[?25hCollecting tensorflow-addons\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/74/e3/56d2fe76f0bb7c88ed9b2a6a557e25e83e252aec08f13de34369cd850a0b/tensorflow_addons-0.12.1-cp37-cp37m-manylinux2010_x86_64.whl (703kB)\n",
"\u001b[K |████████████████████████████████| 706kB 40.4MB/s \n",
"\u001b[?25hCollecting sentencepiece\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/f5/99/e0808cb947ba10f575839c43e8fafc9cc44e4a7a2c8f79c60db48220a577/sentencepiece-0.1.95-cp37-cp37m-manylinux2014_x86_64.whl (1.2MB)\n",
"\u001b[K |████████████████████████████████| 1.2MB 38.9MB/s \n",
"\u001b[?25hRequirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.7/dist-packages (from tf-models-official->object-detection==0.1) (5.4.8)\n",
"Collecting py-cpuinfo>=3.3.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/f6/f5/8e6e85ce2e9f6e05040cf0d4e26f43a4718bcc4bce988b433276d4b1a5c1/py-cpuinfo-7.0.0.tar.gz (95kB)\n",
"\u001b[K |████████████████████████████████| 102kB 10.8MB/s \n",
"\u001b[?25hCollecting tensorflow-model-optimization>=0.4.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/55/38/4fd48ea1bfcb0b6e36d949025200426fe9c3a8bfae029f0973d85518fa5a/tensorflow_model_optimization-0.5.0-py2.py3-none-any.whl (172kB)\n",
"\u001b[K |████████████████████████████████| 174kB 43.0MB/s \n",
"\u001b[?25hCollecting dataclasses\n",
" Downloading https://files.pythonhosted.org/packages/26/2f/1095cdc2868052dd1e64520f7c0d5c8c550ad297e944e641dbf1ffbb9a5d/dataclasses-0.6-py3-none-any.whl\n",
"Requirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.7/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (0.2.8)\n",
"Requirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (4.7.2)\n",
"Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.7/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (0.4.8)\n",
"Collecting pbr>=0.11\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/fb/48/69046506f6ac61c1eaa9a0d42d22d54673b69e176d30ca98e3f61513e980/pbr-5.5.1-py2.py3-none-any.whl (106kB)\n",
"\u001b[K |████████████████████████████████| 112kB 29.8MB/s \n",
"\u001b[?25hRequirement already satisfied: docopt in /usr/local/lib/python3.7/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1) (0.6.2)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (1.24.3)\n",
"Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (3.0.4)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2.10)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2020.12.5)\n",
"Requirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.26.1)\n",
"Requirement already satisfied: google-auth>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.27.1)\n",
"Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (3.0.1)\n",
"Requirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (0.0.4)\n",
"Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.7/dist-packages (from seqeval->tf-models-official->object-detection==0.1) (0.22.2.post1)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official->object-detection==0.1) (4.41.1)\n",
"Requirement already satisfied: python-slugify in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official->object-detection==0.1) (4.0.1)\n",
"Requirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (0.36.2)\n",
"Collecting h5py~=2.10.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/3f/c0/abde58b837e066bca19a3f7332d9d0493521d7dd6b48248451a9e3fe2214/h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9MB)\n",
"\u001b[K |████████████████████████████████| 2.9MB 33.3MB/s \n",
"\u001b[?25hRequirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.1.2)\n",
"Requirement already satisfied: tensorflow-estimator<2.5.0,>=2.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (2.4.0)\n",
"Requirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.6.3)\n",
"Requirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.12.1)\n",
"Collecting gast==0.3.3\n",
" Downloading https://files.pythonhosted.org/packages/d6/84/759f5dd23fec8ba71952d97bcc7e2c9d7d63bdc582421f3cd4be845f0c98/gast-0.3.3-py2.py3-none-any.whl\n",
"Requirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (3.3.0)\n",
"Requirement already satisfied: tensorboard~=2.4 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (2.4.1)\n",
"Requirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (0.2.0)\n",
"Requirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.12)\n",
"Requirement already satisfied: termcolor~=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.1.0)\n",
"Requirement already satisfied: google-cloud-core<2.0dev,>=1.0.3 in /usr/local/lib/python3.7/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.0.3)\n",
"Requirement already satisfied: google-resumable-media!=0.4.0,<0.5.0dev,>=0.3.1 in /usr/local/lib/python3.7/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (0.4.1)\n",
"Requirement already satisfied: dm-tree in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (0.1.5)\n",
"Requirement already satisfied: importlib-resources; python_version < \"3.9\" in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (5.1.2)\n",
"Requirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (20.3.0)\n",
"Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (0.28.0)\n",
"Requirement already satisfied: promise in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (2.3)\n",
"Requirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-addons->tf-models-official->object-detection==0.1) (2.7.1)\n",
"Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (20.9)\n",
"Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.53.0)\n",
"Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (4.2.1)\n",
"Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official->object-detection==0.1) (1.0.1)\n",
"Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.7/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official->object-detection==0.1) (1.3)\n",
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (3.3.4)\n",
"Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (0.4.3)\n",
"Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.8.0)\n",
"Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.0.1)\n",
"Requirement already satisfied: zipp>=0.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-resources; python_version < \"3.9\"->tensorflow-datasets->tf-models-official->object-detection==0.1) (3.4.1)\n",
"Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (3.7.2)\n",
"Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (1.3.0)\n",
"Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow>=2.4.0->tf-models-official->object-detection==0.1) (3.1.0)\n",
"Building wheels for collected packages: object-detection, avro-python3, dill, future, seqeval, py-cpuinfo\n",
" Building wheel for object-detection (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for object-detection: filename=object_detection-0.1-cp37-none-any.whl size=1628562 sha256=97e530208c27cb77db145a581884ae5d7a9212d442a292ec32eb5f9b6d18708b\n",
" Stored in directory: /tmp/pip-ephem-wheel-cache-efl7sp0x/wheels/94/49/4b/39b051683087a22ef7e80ec52152a27249d1a644ccf4e442ea\n",
" Building wheel for avro-python3 (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for avro-python3: filename=avro_python3-1.10.2-cp37-none-any.whl size=44011 sha256=a7c24e2e64ea760ccf08745027762260f684b96281c0620ebbffbfd8f3759f27\n",
" Stored in directory: /root/.cache/pip/wheels/ee/ee/18/c466221ca6900e3efce2f4ea9c329288808679aecdcb2838d3\n",
" Building wheel for dill (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for dill: filename=dill-0.3.1.1-cp37-none-any.whl size=78532 sha256=c6c95b840d06a9acbbee8e3defd5d88cf18badd48f19a77c8a763cd6b8b20b98\n",
" Stored in directory: /root/.cache/pip/wheels/59/b1/91/f02e76c732915c4015ab4010f3015469866c1eb9b14058d8e7\n",
" Building wheel for future (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for future: filename=future-0.18.2-cp37-none-any.whl size=491058 sha256=5d4b43f6b473d064f7727808708a72356208f010b0abc9f8a2fe6c757df9b1bf\n",
" Stored in directory: /root/.cache/pip/wheels/8b/99/a0/81daf51dcd359a9377b110a8a886b3895921802d2fc1b2397e\n",
" Building wheel for seqeval (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for seqeval: filename=seqeval-1.2.2-cp37-none-any.whl size=16172 sha256=93fe09e74a48e734a541a05da60d8a96bfa3c74a699ac94327f1efc6cc17c2a5\n",
" Stored in directory: /root/.cache/pip/wheels/52/df/1b/45d75646c37428f7e626214704a0e35bd3cfc32eda37e59e5f\n",
" Building wheel for py-cpuinfo (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for py-cpuinfo: filename=py_cpuinfo-7.0.0-cp37-none-any.whl size=20070 sha256=a0dc44f9a8ccdec26c0000ea1050ae1909714e1d14b5b320d8be969b499bf1cd\n",
" Stored in directory: /root/.cache/pip/wheels/f1/93/7b/127daf0c3a5a49feb2fecd468d508067c733fba5192f726ad1\n",
"Successfully built object-detection avro-python3 dill future seqeval py-cpuinfo\n",
"\u001b[31mERROR: tf-nightly 2.5.0.dev20210321 has requirement gast==0.4.0, but you'll have gast 0.3.3 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: tf-nightly 2.5.0.dev20210321 has requirement h5py~=3.1.0, but you'll have h5py 2.10.0 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: tensorflow 2.4.1 has requirement grpcio~=1.32.0, but you'll have grpcio 1.34.1 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: multiprocess 0.70.11.1 has requirement dill>=0.3.3, but you'll have dill 0.3.1.1 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: google-colab 1.0.0 has requirement requests~=2.23.0, but you'll have requests 2.25.1 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: apache-beam 2.28.0 has requirement avro-python3!=1.9.2,<1.10.0,>=1.8.1, but you'll have avro-python3 1.10.2 which is incompatible.\u001b[0m\n",
"Installing collected packages: avro-python3, pbr, mock, dill, pyarrow, requests, hdfs, future, fastavro, apache-beam, tf-slim, lvis, seqeval, opencv-python-headless, pyyaml, tensorflow-addons, sentencepiece, py-cpuinfo, tensorflow-model-optimization, dataclasses, tf-models-official, object-detection, h5py, gast\n",
" Found existing installation: dill 0.3.3\n",
" Uninstalling dill-0.3.3:\n",
" Successfully uninstalled dill-0.3.3\n",
" Found existing installation: pyarrow 3.0.0\n",
" Uninstalling pyarrow-3.0.0:\n",
" Successfully uninstalled pyarrow-3.0.0\n",
" Found existing installation: requests 2.23.0\n",
" Uninstalling requests-2.23.0:\n",
" Successfully uninstalled requests-2.23.0\n",
" Found existing installation: future 0.16.0\n",
" Uninstalling future-0.16.0:\n",
" Successfully uninstalled future-0.16.0\n",
" Found existing installation: PyYAML 3.13\n",
" Uninstalling PyYAML-3.13:\n",
" Successfully uninstalled PyYAML-3.13\n",
" Found existing installation: h5py 3.1.0\n",
" Uninstalling h5py-3.1.0:\n",
" Successfully uninstalled h5py-3.1.0\n",
" Found existing installation: gast 0.4.0\n",
" Uninstalling gast-0.4.0:\n",
" Successfully uninstalled gast-0.4.0\n",
"Successfully installed apache-beam-2.28.0 avro-python3-1.10.2 dataclasses-0.6 dill-0.3.1.1 fastavro-1.3.4 future-0.18.2 gast-0.3.3 h5py-2.10.0 hdfs-2.6.0 lvis-0.5.3 mock-2.0.0 object-detection-0.1 opencv-python-headless-4.5.1.48 pbr-5.5.1 py-cpuinfo-7.0.0 pyarrow-2.0.0 pyyaml-5.4.1 requests-2.25.1 sentencepiece-0.1.95 seqeval-1.2.2 tensorflow-addons-0.12.1 tensorflow-model-optimization-0.5.0 tf-models-official-2.4.0 tf-slim-1.1.0\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "y4izg5T6oMpA",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "32e6a6a4-c350-4152-f04b-4dcd51624b6f"
},
"source": [
"# Test the installation.\n",
"!python3 object_detection/builders/model_builder_tf2_test.py"
],
"execution_count": 9,
"outputs": [
{
"output_type": "stream",
"text": [
"2021-03-21 14:42:52.229001: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"Running tests under Python 3.7.10: /usr/bin/python3\n",
"[ RUN ] ModelBuilderTF2Test.test_create_center_net_model\n",
"2021-03-21 14:42:55.470762: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:42:55.476536: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:42:55.477377: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:42:55.477424: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:42:55.490849: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:42:55.490937: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:42:55.492817: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:42:55.493238: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:42:55.493377: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:42:55.494004: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:42:55.494231: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:42:55.494266: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:42:55.494546: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:42:55.494732: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:42:55.494765: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model): 0.71s\n",
"I0321 14:42:55.934461 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_center_net_model): 0.71s\n",
"[ OK ] ModelBuilderTF2Test.test_create_center_net_model\n",
"[ RUN ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 0.3s\n",
"I0321 14:42:56.233325 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 0.3s\n",
"[ OK ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints\n",
"[ RUN ] ModelBuilderTF2Test.test_create_experimental_model\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s\n",
"I0321 14:42:56.234284 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_create_experimental_model\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.03s\n",
"I0321 14:42:56.261316 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.03s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.02s\n",
"I0321 14:42:56.278748 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.02s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.02s\n",
"I0321 14:42:56.296559 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.02s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.11s\n",
"I0321 14:42:56.410984 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.11s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.11s\n",
"I0321 14:42:56.518530 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.11s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.12s\n",
"I0321 14:42:56.637575 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.12s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul\n",
"[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.11s\n",
"I0321 14:42:56.745888 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.11s\n",
"[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul\n",
"[ RUN ] ModelBuilderTF2Test.test_create_rfcn_model_from_config\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.12s\n",
"I0321 14:42:56.870300 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.12s\n",
"[ OK ] ModelBuilderTF2Test.test_create_rfcn_model_from_config\n",
"[ RUN ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.03s\n",
"I0321 14:42:56.900315 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.03s\n",
"[ OK ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config\n",
"[ RUN ] ModelBuilderTF2Test.test_create_ssd_models_from_config\n",
"I0321 14:42:57.106511 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b0\n",
"I0321 14:42:57.106765 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 64\n",
"I0321 14:42:57.106866 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 3\n",
"I0321 14:42:57.110469 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:42:57.138941 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:42:57.139089 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:42:57.206282 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:42:57.206487 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:42:57.377593 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:42:57.377822 139747933968256 efficientnet_model.py:147] round_filter input=40 output=40\n",
"I0321 14:42:57.547707 139747933968256 efficientnet_model.py:147] round_filter input=40 output=40\n",
"I0321 14:42:57.547938 139747933968256 efficientnet_model.py:147] round_filter input=80 output=80\n",
"I0321 14:42:57.819769 139747933968256 efficientnet_model.py:147] round_filter input=80 output=80\n",
"I0321 14:42:57.819976 139747933968256 efficientnet_model.py:147] round_filter input=112 output=112\n",
"I0321 14:42:58.210364 139747933968256 efficientnet_model.py:147] round_filter input=112 output=112\n",
"I0321 14:42:58.210565 139747933968256 efficientnet_model.py:147] round_filter input=192 output=192\n",
"I0321 14:42:58.605795 139747933968256 efficientnet_model.py:147] round_filter input=192 output=192\n",
"I0321 14:42:58.606011 139747933968256 efficientnet_model.py:147] round_filter input=320 output=320\n",
"I0321 14:42:58.705002 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=1280\n",
"I0321 14:42:58.759041 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.0, resolution=224, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:42:58.818094 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b1\n",
"I0321 14:42:58.818284 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 88\n",
"I0321 14:42:58.818384 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 4\n",
"I0321 14:42:58.820331 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:42:58.837288 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:42:58.837416 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:42:58.972846 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:42:58.973138 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:42:59.226389 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:42:59.226592 139747933968256 efficientnet_model.py:147] round_filter input=40 output=40\n",
"I0321 14:42:59.486196 139747933968256 efficientnet_model.py:147] round_filter input=40 output=40\n",
"I0321 14:42:59.486456 139747933968256 efficientnet_model.py:147] round_filter input=80 output=80\n",
"I0321 14:42:59.842376 139747933968256 efficientnet_model.py:147] round_filter input=80 output=80\n",
"I0321 14:42:59.842565 139747933968256 efficientnet_model.py:147] round_filter input=112 output=112\n",
"I0321 14:43:00.212787 139747933968256 efficientnet_model.py:147] round_filter input=112 output=112\n",
"I0321 14:43:00.213007 139747933968256 efficientnet_model.py:147] round_filter input=192 output=192\n",
"I0321 14:43:00.720852 139747933968256 efficientnet_model.py:147] round_filter input=192 output=192\n",
"I0321 14:43:00.721086 139747933968256 efficientnet_model.py:147] round_filter input=320 output=320\n",
"I0321 14:43:00.951671 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=1280\n",
"I0321 14:43:01.013576 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.1, resolution=240, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:01.085645 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b2\n",
"I0321 14:43:01.085920 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 112\n",
"I0321 14:43:01.086032 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 5\n",
"I0321 14:43:01.087928 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:43:01.104181 139747933968256 efficientnet_model.py:147] round_filter input=32 output=32\n",
"I0321 14:43:01.104301 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:43:01.232951 139747933968256 efficientnet_model.py:147] round_filter input=16 output=16\n",
"I0321 14:43:01.233179 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:43:01.498100 139747933968256 efficientnet_model.py:147] round_filter input=24 output=24\n",
"I0321 14:43:01.498293 139747933968256 efficientnet_model.py:147] round_filter input=40 output=48\n",
"I0321 14:43:01.755674 139747933968256 efficientnet_model.py:147] round_filter input=40 output=48\n",
"I0321 14:43:01.755946 139747933968256 efficientnet_model.py:147] round_filter input=80 output=88\n",
"I0321 14:43:02.263819 139747933968256 efficientnet_model.py:147] round_filter input=80 output=88\n",
"I0321 14:43:02.264040 139747933968256 efficientnet_model.py:147] round_filter input=112 output=120\n",
"I0321 14:43:02.637386 139747933968256 efficientnet_model.py:147] round_filter input=112 output=120\n",
"I0321 14:43:02.637599 139747933968256 efficientnet_model.py:147] round_filter input=192 output=208\n",
"I0321 14:43:03.176709 139747933968256 efficientnet_model.py:147] round_filter input=192 output=208\n",
"I0321 14:43:03.176988 139747933968256 efficientnet_model.py:147] round_filter input=320 output=352\n",
"I0321 14:43:03.416572 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=1408\n",
"I0321 14:43:03.473567 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.1, depth_coefficient=1.2, resolution=260, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:03.546838 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b3\n",
"I0321 14:43:03.547051 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 160\n",
"I0321 14:43:03.547190 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 6\n",
"I0321 14:43:03.549105 139747933968256 efficientnet_model.py:147] round_filter input=32 output=40\n",
"I0321 14:43:03.566800 139747933968256 efficientnet_model.py:147] round_filter input=32 output=40\n",
"I0321 14:43:03.566934 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:03.721905 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:03.722141 139747933968256 efficientnet_model.py:147] round_filter input=24 output=32\n",
"I0321 14:43:03.985135 139747933968256 efficientnet_model.py:147] round_filter input=24 output=32\n",
"I0321 14:43:03.985343 139747933968256 efficientnet_model.py:147] round_filter input=40 output=48\n",
"I0321 14:43:04.252384 139747933968256 efficientnet_model.py:147] round_filter input=40 output=48\n",
"I0321 14:43:04.252609 139747933968256 efficientnet_model.py:147] round_filter input=80 output=96\n",
"I0321 14:43:04.710540 139747933968256 efficientnet_model.py:147] round_filter input=80 output=96\n",
"I0321 14:43:04.710750 139747933968256 efficientnet_model.py:147] round_filter input=112 output=136\n",
"I0321 14:43:05.182297 139747933968256 efficientnet_model.py:147] round_filter input=112 output=136\n",
"I0321 14:43:05.182517 139747933968256 efficientnet_model.py:147] round_filter input=192 output=232\n",
"I0321 14:43:05.844224 139747933968256 efficientnet_model.py:147] round_filter input=192 output=232\n",
"I0321 14:43:05.844433 139747933968256 efficientnet_model.py:147] round_filter input=320 output=384\n",
"I0321 14:43:06.104494 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=1536\n",
"I0321 14:43:06.175492 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.2, depth_coefficient=1.4, resolution=300, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:06.253545 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b4\n",
"I0321 14:43:06.253795 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 224\n",
"I0321 14:43:06.253896 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 7\n",
"I0321 14:43:06.255743 139747933968256 efficientnet_model.py:147] round_filter input=32 output=48\n",
"I0321 14:43:06.274245 139747933968256 efficientnet_model.py:147] round_filter input=32 output=48\n",
"I0321 14:43:06.274374 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:06.410500 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:06.410720 139747933968256 efficientnet_model.py:147] round_filter input=24 output=32\n",
"I0321 14:43:06.763848 139747933968256 efficientnet_model.py:147] round_filter input=24 output=32\n",
"I0321 14:43:06.764058 139747933968256 efficientnet_model.py:147] round_filter input=40 output=56\n",
"I0321 14:43:07.335688 139747933968256 efficientnet_model.py:147] round_filter input=40 output=56\n",
"I0321 14:43:07.335909 139747933968256 efficientnet_model.py:147] round_filter input=80 output=112\n",
"I0321 14:43:07.891472 139747933968256 efficientnet_model.py:147] round_filter input=80 output=112\n",
"I0321 14:43:07.891699 139747933968256 efficientnet_model.py:147] round_filter input=112 output=160\n",
"I0321 14:43:08.476104 139747933968256 efficientnet_model.py:147] round_filter input=112 output=160\n",
"I0321 14:43:08.476353 139747933968256 efficientnet_model.py:147] round_filter input=192 output=272\n",
"I0321 14:43:09.407405 139747933968256 efficientnet_model.py:147] round_filter input=192 output=272\n",
"I0321 14:43:09.407608 139747933968256 efficientnet_model.py:147] round_filter input=320 output=448\n",
"I0321 14:43:09.687679 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=1792\n",
"I0321 14:43:09.771390 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.4, depth_coefficient=1.8, resolution=380, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:09.858954 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b5\n",
"I0321 14:43:09.859138 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 288\n",
"I0321 14:43:09.859253 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 7\n",
"I0321 14:43:09.861072 139747933968256 efficientnet_model.py:147] round_filter input=32 output=48\n",
"I0321 14:43:09.878690 139747933968256 efficientnet_model.py:147] round_filter input=32 output=48\n",
"I0321 14:43:09.878830 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:10.088265 139747933968256 efficientnet_model.py:147] round_filter input=16 output=24\n",
"I0321 14:43:10.088489 139747933968256 efficientnet_model.py:147] round_filter input=24 output=40\n",
"I0321 14:43:10.525182 139747933968256 efficientnet_model.py:147] round_filter input=24 output=40\n",
"I0321 14:43:10.525383 139747933968256 efficientnet_model.py:147] round_filter input=40 output=64\n",
"I0321 14:43:10.979574 139747933968256 efficientnet_model.py:147] round_filter input=40 output=64\n",
"I0321 14:43:10.979800 139747933968256 efficientnet_model.py:147] round_filter input=80 output=128\n",
"I0321 14:43:11.610625 139747933968256 efficientnet_model.py:147] round_filter input=80 output=128\n",
"I0321 14:43:11.610879 139747933968256 efficientnet_model.py:147] round_filter input=112 output=176\n",
"I0321 14:43:12.477487 139747933968256 efficientnet_model.py:147] round_filter input=112 output=176\n",
"I0321 14:43:12.477707 139747933968256 efficientnet_model.py:147] round_filter input=192 output=304\n",
"I0321 14:43:13.549219 139747933968256 efficientnet_model.py:147] round_filter input=192 output=304\n",
"I0321 14:43:13.549463 139747933968256 efficientnet_model.py:147] round_filter input=320 output=512\n",
"I0321 14:43:14.053011 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=2048\n",
"I0321 14:43:14.135651 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.6, depth_coefficient=2.2, resolution=456, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:14.237786 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b6\n",
"I0321 14:43:14.237978 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 384\n",
"I0321 14:43:14.238088 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 8\n",
"I0321 14:43:14.240010 139747933968256 efficientnet_model.py:147] round_filter input=32 output=56\n",
"I0321 14:43:14.258316 139747933968256 efficientnet_model.py:147] round_filter input=32 output=56\n",
"I0321 14:43:14.258441 139747933968256 efficientnet_model.py:147] round_filter input=16 output=32\n",
"I0321 14:43:14.468545 139747933968256 efficientnet_model.py:147] round_filter input=16 output=32\n",
"I0321 14:43:14.468775 139747933968256 efficientnet_model.py:147] round_filter input=24 output=40\n",
"I0321 14:43:14.997230 139747933968256 efficientnet_model.py:147] round_filter input=24 output=40\n",
"I0321 14:43:14.997431 139747933968256 efficientnet_model.py:147] round_filter input=40 output=72\n",
"I0321 14:43:15.535362 139747933968256 efficientnet_model.py:147] round_filter input=40 output=72\n",
"I0321 14:43:15.535566 139747933968256 efficientnet_model.py:147] round_filter input=80 output=144\n",
"I0321 14:43:16.302236 139747933968256 efficientnet_model.py:147] round_filter input=80 output=144\n",
"I0321 14:43:16.302465 139747933968256 efficientnet_model.py:147] round_filter input=112 output=200\n",
"I0321 14:43:17.140637 139747933968256 efficientnet_model.py:147] round_filter input=112 output=200\n",
"I0321 14:43:17.140989 139747933968256 efficientnet_model.py:147] round_filter input=192 output=344\n",
"I0321 14:43:18.560842 139747933968256 efficientnet_model.py:147] round_filter input=192 output=344\n",
"I0321 14:43:18.561133 139747933968256 efficientnet_model.py:147] round_filter input=320 output=576\n",
"I0321 14:43:19.350369 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=2304\n",
"I0321 14:43:19.435862 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.8, depth_coefficient=2.6, resolution=528, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"I0321 14:43:19.555930 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet EfficientNet backbone version: efficientnet-b7\n",
"I0321 14:43:19.556143 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:145] EfficientDet BiFPN num filters: 384\n",
"I0321 14:43:19.556266 139747933968256 ssd_efficientnet_bifpn_feature_extractor.py:147] EfficientDet BiFPN num iterations: 8\n",
"I0321 14:43:19.558204 139747933968256 efficientnet_model.py:147] round_filter input=32 output=64\n",
"I0321 14:43:19.575318 139747933968256 efficientnet_model.py:147] round_filter input=32 output=64\n",
"I0321 14:43:19.575440 139747933968256 efficientnet_model.py:147] round_filter input=16 output=32\n",
"I0321 14:43:19.852274 139747933968256 efficientnet_model.py:147] round_filter input=16 output=32\n",
"I0321 14:43:19.852475 139747933968256 efficientnet_model.py:147] round_filter input=24 output=48\n",
"I0321 14:43:20.486307 139747933968256 efficientnet_model.py:147] round_filter input=24 output=48\n",
"I0321 14:43:20.486532 139747933968256 efficientnet_model.py:147] round_filter input=40 output=80\n",
"I0321 14:43:21.130296 139747933968256 efficientnet_model.py:147] round_filter input=40 output=80\n",
"I0321 14:43:21.130503 139747933968256 efficientnet_model.py:147] round_filter input=80 output=160\n",
"I0321 14:43:22.092345 139747933968256 efficientnet_model.py:147] round_filter input=80 output=160\n",
"I0321 14:43:22.092543 139747933968256 efficientnet_model.py:147] round_filter input=112 output=224\n",
"I0321 14:43:23.134568 139747933968256 efficientnet_model.py:147] round_filter input=112 output=224\n",
"I0321 14:43:23.134848 139747933968256 efficientnet_model.py:147] round_filter input=192 output=384\n",
"I0321 14:43:24.925958 139747933968256 efficientnet_model.py:147] round_filter input=192 output=384\n",
"I0321 14:43:24.926266 139747933968256 efficientnet_model.py:147] round_filter input=320 output=640\n",
"I0321 14:43:25.835597 139747933968256 efficientnet_model.py:147] round_filter input=1280 output=2560\n",
"I0321 14:43:25.935189 139747933968256 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=2.0, depth_coefficient=3.1, resolution=600, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 29.45s\n",
"I0321 14:43:26.353718 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 29.45s\n",
"[ OK ] ModelBuilderTF2Test.test_create_ssd_models_from_config\n",
"[ RUN ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s\n",
"I0321 14:43:26.362104 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update\n",
"[ RUN ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s\n",
"I0321 14:43:26.364299 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold\n",
"[ RUN ] ModelBuilderTF2Test.test_invalid_model_config_proto\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s\n",
"I0321 14:43:26.365052 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_invalid_model_config_proto\n",
"[ RUN ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s\n",
"I0321 14:43:26.367069 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size\n",
"[ RUN ] ModelBuilderTF2Test.test_session\n",
"[ SKIPPED ] ModelBuilderTF2Test.test_session\n",
"[ RUN ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s\n",
"I0321 14:43:26.368917 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor\n",
"[ RUN ] ModelBuilderTF2Test.test_unknown_meta_architecture\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s\n",
"I0321 14:43:26.369500 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_unknown_meta_architecture\n",
"[ RUN ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor\n",
"INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s\n",
"I0321 14:43:26.370775 139747933968256 test_util.py:2096] time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s\n",
"[ OK ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor\n",
"----------------------------------------------------------------------\n",
"Ran 21 tests in 31.144s\n",
"\n",
"OK (skipped=1)\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "c-fL1Ei3sQCx"
},
"source": [
"## Install EdgeTPU Compiler"
]
},
{
"cell_type": "code",
"metadata": {
"id": "SfGFiecZsTjX",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "24e8a801-e3e4-428f-a794-fb17cdf6a92a"
},
"source": [
"!echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | tee /etc/apt/sources.list.d/coral-edgetpu.list\n",
"!curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\n",
"!sudo apt -qq update\n",
"!apt-get -qq install edgetpu-compiler"
],
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"text": [
"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\n",
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 1974 100 1974 0 0 73111 0 --:--:-- --:--:-- --:--:-- 73111\n",
"OK\n",
"53 packages can be upgraded. Run 'apt list --upgradable' to see them.\n",
"Selecting previously unselected package edgetpu-compiler.\n",
"(Reading database ... 160980 files and directories currently installed.)\n",
"Preparing to unpack .../edgetpu-compiler_15.0_amd64.deb ...\n",
"Unpacking edgetpu-compiler (15.0) ...\n",
"Setting up edgetpu-compiler (15.0) ...\n",
"Processing triggers for libc-bin (2.27-3ubuntu1.2) ...\n",
"/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n",
"\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "26ba2wNassGI",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "3a617b66-a1b8-4b5b-d48a-68d14f6fd801"
},
"source": [
"!edgetpu_compiler -v"
],
"execution_count": 11,
"outputs": [
{
"output_type": "stream",
"text": [
"Edge TPU Compiler version 15.0.340273435\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "i2YPFpUKpAFT"
},
"source": [
"# Download pre-trained model and convert edge tpu model.\n"
]
},
{
"cell_type": "code",
"metadata": {
"id": "cwAH4rR4f3Le"
},
"source": [
"Model = namedtuple('Model', ['download_url', 'extract_dir', 'width', 'height'])"
],
"execution_count": 12,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "KpwGG4_E9T8I"
},
"source": [
""
]
},
{
"cell_type": "code",
"metadata": {
"id": "rDGGCQ_29TVu"
},
"source": [
""
],
"execution_count": 12,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "JPJRWodKUTCO"
},
"source": [
"SSD_MODELS = {\n",
" 'ssd_mobilenet_v1_fpn_640x640_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz',\n",
" 'ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8',\n",
" 640, 640\n",
" ),\n",
" 'ssd_mobilenet_v2_320x320_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz',\n",
" 'ssd_mobilenet_v2_320x320_coco17_tpu-8',\n",
" 300, 300\n",
" ),\n",
" 'ssd_mobilenet_v2_fpnlite_320x320_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz',\n",
" 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8',\n",
" 320, 320\n",
" ),\n",
" 'ssd_mobilenet_v2_fpnlite_640x640_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz',\n",
" 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8',\n",
" 640, 640\n",
" ),\n",
" 'ssd_resnet50_v1_fpn_640x640_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet50_v1_fpn_640x640_coco17_tpu-8',\n",
" 640, 640\n",
" ),\n",
" 'ssd_resnet50_v1_fpn_1024x1024_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8',\n",
" 1024, 1024\n",
" ),\n",
" 'ssd_resnet101_v1_fpn_640x640_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet101_v1_fpn_640x640_coco17_tpu-8',\n",
" 640, 640\n",
" ),\n",
" 'ssd_resnet101_v1_fpn_1024x1024_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8',\n",
" 1024, 1024\n",
" ),\n",
" 'ssd_resnet152_v1_fpn_640x640_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet152_v1_fpn_640x640_coco17_tpu-8',\n",
" 640, 640\n",
" ),\n",
" 'ssd_resnet152_v1_fpn_1024x1024_coco17': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.tar.gz',\n",
" 'ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8',\n",
" 1024, 1024\n",
" ),\n",
" 'centernet_mobilenetv2fpn_512x512_coco17_od': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20210210/centernet_mobilenetv2fpn_512x512_coco17_od.tar.gz',\n",
" 'centernet_mobilenetv2_fpn_od',\n",
" 320, 320\n",
" ),\n",
" 'centernet_mobilenetv2fpn_512x512_coco17_od': Model(\n",
" 'http://download.tensorflow.org/models/object_detection/tf2/20210210/centernet_mobilenetv2fpn_512x512_coco17_kpts.tar.gz',\n",
" 'centernet_mobilenetv2_fpn_kpts',\n",
" 320, 320\n",
" ), \n",
"}"
],
"execution_count": 13,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "M2JrDoHqYjnn"
},
"source": [
"make output directory."
]
},
{
"cell_type": "code",
"metadata": {
"id": "NATCzAsFRafr"
},
"source": [
"models_dir = os.path.join('/', 'content', 'tflite_v2_models')\n",
"os.mkdir(models_dir)"
],
"execution_count": 14,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "99rpeyUgIwKq"
},
"source": [
"Helper functions."
]
},
{
"cell_type": "code",
"metadata": {
"id": "XWV3xph3vYcg"
},
"source": [
"def convert_quant_full_int_model(input_path, output_path, width, height):\n",
" input_width = width\n",
" input_height = height\n",
"\n",
" def representative_data_gen():\n",
" for data in raw_test_data.take(100):\n",
" image = data['image'].numpy()\n",
" image = tf.image.resize(image, (input_height, input_width))\n",
" image = image[np.newaxis,:,:,:]\n",
" image = image - 127.5\n",
" image = image * 0.007843\n",
" yield [image]\n",
"\n",
" converter = tf.lite.TFLiteConverter.from_saved_model(input_path, signature_keys=['serving_default'])\n",
" converter.optimizations = [tf.lite.Optimize.DEFAULT]\n",
" converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS]\n",
" # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]\n",
" converter.inference_input_type = tf.uint8\n",
" converter.inference_output_type = tf.uint8\n",
" converter.representative_dataset = representative_data_gen\n",
" tflite_full_integer_quant_model = converter.convert()\n",
" \n",
" with tf.io.gfile.GFile(output_path, 'wb') as f:\n",
" f.write(tflite_full_integer_quant_model)"
],
"execution_count": 15,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "r_i2O7svC1R6"
},
"source": [
"import subprocess\n",
"from subprocess import PIPE"
],
"execution_count": 16,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "1oK8AW1WgpB6",
"outputId": "ccb75faa-7bc4-4a54-f62e-6b1a69dbe16a"
},
"source": [
"for key in SSD_MODELS.keys():\n",
" model = SSD_MODELS[key]\n",
"\n",
" print('\\n')\n",
" print('----- ' + key + ' -----')\n",
" print('\\n')\n",
"\n",
" # Download tar and extract\n",
" download_url = model.download_url\n",
" download_file_path = os.path.basename(os.path.basename(model.download_url))\n",
" file_path = os.path.join('/', 'content', download_file_path)\n",
"\n",
" !wget $download_url -P /content/\n",
" !tar xf $file_path -C /content/\n",
"\n",
" # Convert save_model to tflite model.\n",
" model_path = os.path.join('/', 'content', os.path.basename(model.extract_dir))\n",
" pipeline_config_path = os.path.join(model_path, 'pipeline.config')\n",
" checkpoint_path = os.path.join(model_path, 'checkpoint')\n",
" output_path = os.path.join(model_path, 'tflite')\n",
"\n",
"\n",
" if 'centernet' in key:\n",
" is_centernet_include_keypoints = 'false'\n",
" if 'kpts' in key:\n",
" is_centernet_include_keypoints = 'true'\n",
" max_detections_num = 10\n",
" override_text = \"model{ center_net { image_resizer { fixed_shape_resizer { height: 320 width: 320 } } } }\"\n",
"\n",
" !python3 object_detection/export_tflite_graph_tf2.py \\\n",
" --trained_checkpoint_dir $checkpoint_path \\\n",
" --output_directory $output_path \\\n",
" --centernet_include_keypoints=false \\\n",
" --max_detections=10 \\\n",
" --pipeline_config_path $pipeline_config_path \\\n",
" --config_override=\"$override_text\" \n",
" \n",
" else:\n",
" !python3 object_detection/export_tflite_graph_tf2.py \\\n",
" --pipeline_config_path $pipeline_config_path \\\n",
" --trained_checkpoint_dir $checkpoint_path \\\n",
" --output_directory $output_path\n",
"\n",
"\n",
" # Convert Full integer model.\n",
" input_path = os.path.join(model_path, 'tflite', 'saved_model')\n",
" output_path = os.path.join(models_dir, key + '_quant.tflite')\n",
" convert_quant_full_int_model(input_path, output_path, model.width, model.height)\n",
"\n",
" # Convert EdgeTPU Model\n",
" !edgetpu_compiler -s -o $models_dir $output_path\n",
"\n",
" # Delete Files\n",
" !rm -rf $file_path\n",
" !rm -rf $download_file_path"
],
"execution_count": 17,
"outputs": [
{
"output_type": "stream",
"text": [
"\n",
"\n",
"----- ssd_mobilenet_v1_fpn_640x640_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 14:43:44-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 108.177.97.128, 2404:6800:4008:c03::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|108.177.97.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 90453990 (86M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_mobilenet_v1_fp 100%[===================>] 86.26M 33.8MB/s in 2.6s \n",
"\n",
"2021-03-21 14:43:48 (33.8 MB/s) - ‘/content/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz’ saved [90453990/90453990]\n",
"\n",
"2021-03-21 14:43:50.840548: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:43:53.052689: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:43:53.058746: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:43:53.059537: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:43:53.059596: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:43:53.066323: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:43:53.066396: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:43:53.074172: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:43:53.074506: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:43:53.074626: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:43:53.075259: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:43:53.075474: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:43:53.075508: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:43:53.075828: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:43:53.076033: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:43:53.076065: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:44:00.414318: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:44:00.414372: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:44:00.447312: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 14:44:02.639246: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:44:02.639330: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7ff741ba0450>, because it is not built.\n",
"W0321 14:44:03.426404 140701771134848 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7ff741ba0450>, because it is not built.\n",
"2021-03-21 14:44:08.394876: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 14:44:10.129417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:44:10.129481: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 14:44:20.239123 140701771134848 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 670). These functions will not be directly callable after loading.\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 14:44:23.630196 140701771134848 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 14:44:24.025815 140701771134848 builder_impl.py:775] Assets written to: /content/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Model compiled successfully in 4011 ms.\n",
"\n",
"Input model: /content/tflite_v2_models/ssd_mobilenet_v1_fpn_640x640_coco17_quant.tflite\n",
"Input size: 31.01MiB\n",
"Output model: /content/tflite_v2_models/ssd_mobilenet_v1_fpn_640x640_coco17_quant_edgetpu.tflite\n",
"Output size: 35.33MiB\n",
"On-chip memory used for caching model parameters: 6.95MiB\n",
"On-chip memory remaining for caching model parameters: 72.50KiB\n",
"Off-chip memory used for streaming uncached model parameters: 15.52MiB\n",
"Number of Edge TPU subgraphs: 1\n",
"Total number of operations: 113\n",
"Operation log: /content/tflite_v2_models/ssd_mobilenet_v1_fpn_640x640_coco17_quant_edgetpu.log\n",
"\n",
"Model successfully compiled but not all operations are supported by the Edge TPU. A percentage of the model will instead run on the CPU, which is slower. If possible, consider updating your model to use only operations supported by the Edge TPU. For details, visit g.co/coral/model-reqs.\n",
"Number of operations that will run on Edge TPU: 68\n",
"Number of operations that will run on CPU: 45\n",
"\n",
"Operator Count Status\n",
"\n",
"RESHAPE 5 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"RESHAPE 2 More than one subgraph is not supported\n",
"RESHAPE 5 Mapped to Edge TPU\n",
"CONCATENATION 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"CONCATENATION 1 More than one subgraph is not supported\n",
"PACK 4 Tensor has unsupported rank (up to 3 innermost dimensions mapped)\n",
"LOGISTIC 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"ADD 2 More than one subgraph is not supported\n",
"CUSTOM 1 Operation is working on an unsupported data type\n",
"DEQUANTIZE 1 Operation is working on an unsupported data type\n",
"DEQUANTIZE 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"QUANTIZE 4 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"QUANTIZE 1 Mapped to Edge TPU\n",
"DEPTHWISE_CONV_2D 13 Mapped to Edge TPU\n",
"CONV_2D 49 Mapped to Edge TPU\n",
"CONV_2D 22 More than one subgraph is not supported\n",
"\n",
"\n",
"----- ssd_mobilenet_v2_320x320_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 14:51:46-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 64.233.188.128, 2404:6800:4008:c01::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|64.233.188.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 46042990 (44M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_mobilenet_v2_32 100%[===================>] 43.91M 53.3MB/s in 0.8s \n",
"\n",
"2021-03-21 14:51:47 (53.3 MB/s) - ‘/content/ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz’ saved [46042990/46042990]\n",
"\n",
"2021-03-21 14:51:48.974104: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:51:51.687163: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:51:51.693380: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:51:51.694164: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:51:51.694209: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:51:51.697223: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:51:51.697339: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:51:51.699252: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:51:51.699636: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:51:51.699832: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:51:51.700466: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:51:51.700703: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:51:51.700771: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:51:51.701016: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:51:51.701210: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:51:51.701257: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.547549 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.548064 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.548349 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.548642 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.548955 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"INFO:tensorflow:depth of additional conv before box predictor: 0\n",
"I0321 14:51:57.549248 139881281120128 convolutional_keras_box_predictor.py:154] depth of additional conv before box predictor: 0\n",
"2021-03-21 14:51:58.517854: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:51:58.517907: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:51:58.542483: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 14:52:00.190301: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:52:00.190362: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f38304bef90>, because it is not built.\n",
"W0321 14:52:00.976244 139881281120128 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f38304bef90>, because it is not built.\n",
"2021-03-21 14:52:09.795408: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 14:52:11.243893: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:52:11.243959: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 14:52:26.133033 139881281120128 save.py:243] Found untraced functions such as BoxPredictor_layer_call_fn, BoxPredictor_layer_call_and_return_conditional_losses, BoxPredictor_layer_call_fn, BoxPredictor_layer_call_and_return_conditional_losses, BoxPredictor_layer_call_and_return_conditional_losses while saving (showing 5 of 437). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 14:52:30.234444 139881281120128 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_mobilenet_v2_320x320_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 14:52:30.605383 139881281120128 builder_impl.py:775] Assets written to: /content/ssd_mobilenet_v2_320x320_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Model compiled successfully in 1519 ms.\n",
"\n",
"Input model: /content/tflite_v2_models/ssd_mobilenet_v2_320x320_coco17_quant.tflite\n",
"Input size: 6.43MiB\n",
"Output model: /content/tflite_v2_models/ssd_mobilenet_v2_320x320_coco17_quant_edgetpu.tflite\n",
"Output size: 6.75MiB\n",
"On-chip memory used for caching model parameters: 6.52MiB\n",
"On-chip memory remaining for caching model parameters: 1.20MiB\n",
"Off-chip memory used for streaming uncached model parameters: 0.00B\n",
"Number of Edge TPU subgraphs: 1\n",
"Total number of operations: 106\n",
"Operation log: /content/tflite_v2_models/ssd_mobilenet_v2_320x320_coco17_quant_edgetpu.log\n",
"\n",
"Model successfully compiled but not all operations are supported by the Edge TPU. A percentage of the model will instead run on the CPU, which is slower. If possible, consider updating your model to use only operations supported by the Edge TPU. For details, visit g.co/coral/model-reqs.\n",
"Number of operations that will run on Edge TPU: 99\n",
"Number of operations that will run on CPU: 7\n",
"\n",
"Operator Count Status\n",
"\n",
"CONCATENATION 2 Mapped to Edge TPU\n",
"DEPTHWISE_CONV_2D 17 Mapped to Edge TPU\n",
"DEQUANTIZE 2 Operation is working on an unsupported data type\n",
"QUANTIZE 1 Mapped to Edge TPU\n",
"QUANTIZE 4 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"LOGISTIC 1 Mapped to Edge TPU\n",
"CONV_2D 55 Mapped to Edge TPU\n",
"ADD 10 Mapped to Edge TPU\n",
"RESHAPE 13 Mapped to Edge TPU\n",
"CUSTOM 1 Operation is working on an unsupported data type\n",
"\n",
"\n",
"----- ssd_mobilenet_v2_fpnlite_320x320_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 14:53:12-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c03::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 20515344 (20M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_mobilenet_v2_fp 100%[===================>] 19.56M 40.7MB/s in 0.5s \n",
"\n",
"2021-03-21 14:53:13 (40.7 MB/s) - ‘/content/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz’ saved [20515344/20515344]\n",
"\n",
"2021-03-21 14:53:14.508595: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:53:16.672904: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:53:16.677955: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:53:16.678636: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:53:16.678680: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:53:16.681539: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:53:16.681642: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:53:16.683540: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:53:16.683966: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:53:16.684118: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:53:16.684700: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:53:16.684933: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:53:16.684967: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:53:16.685212: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:53:16.685382: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:53:16.685407: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:53:24.443139: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:53:24.443233: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:53:24.477332: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 14:53:26.756119: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:53:26.756170: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fc6701ff750>, because it is not built.\n",
"W0321 14:53:27.574063 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fc6701ff750>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc6701a8ed0>, because it is not built.\n",
"W0321 14:53:27.746343 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc6701a8ed0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d450110>, because it is not built.\n",
"W0321 14:53:27.746614 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d450110>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3cc390>, because it is not built.\n",
"W0321 14:53:27.746803 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3cc390>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d5fecd0>, because it is not built.\n",
"W0321 14:53:27.746965 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d5fecd0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d8e6510>, because it is not built.\n",
"W0321 14:53:27.747115 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d8e6510>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3cced0>, because it is not built.\n",
"W0321 14:53:27.747268 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3cced0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d3fae90>, because it is not built.\n",
"W0321 14:53:27.747415 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d3fae90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3fa910>, because it is not built.\n",
"W0321 14:53:27.747558 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3fa910>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3fa310>, because it is not built.\n",
"W0321 14:53:27.747730 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3fa310>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d555390>, because it is not built.\n",
"W0321 14:53:27.747874 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7fc63d555390>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63db44b10>, because it is not built.\n",
"W0321 14:53:27.748032 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63db44b10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d391ed0>, because it is not built.\n",
"W0321 14:53:27.748177 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d391ed0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac310>, because it is not built.\n",
"W0321 14:53:27.748332 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac310>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d6a63d0>, because it is not built.\n",
"W0321 14:53:27.748471 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d6a63d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d385fd0>, because it is not built.\n",
"W0321 14:53:27.748611 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d385fd0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d352150>, because it is not built.\n",
"W0321 14:53:27.748796 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d352150>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d352290>, because it is not built.\n",
"W0321 14:53:27.748940 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d352290>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d6bfc10>, because it is not built.\n",
"W0321 14:53:27.749089 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d6bfc10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d352ad0>, because it is not built.\n",
"W0321 14:53:27.749231 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d352ad0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d352d90>, because it is not built.\n",
"W0321 14:53:27.749417 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d352d90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac350>, because it is not built.\n",
"W0321 14:53:27.749596 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac350>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326450>, because it is not built.\n",
"W0321 14:53:27.749811 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326450>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d326650>, because it is not built.\n",
"W0321 14:53:27.750015 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d326650>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326c50>, because it is not built.\n",
"W0321 14:53:27.750212 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326c50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3261d0>, because it is not built.\n",
"W0321 14:53:27.750382 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3261d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326750>, because it is not built.\n",
"W0321 14:53:27.750557 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d326750>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d326ed0>, because it is not built.\n",
"W0321 14:53:27.750742 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d326ed0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3a5310>, because it is not built.\n",
"W0321 14:53:27.750945 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3a5310>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3095d0>, because it is not built.\n",
"W0321 14:53:27.751138 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3095d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdb10>, because it is not built.\n",
"W0321 14:53:27.751295 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdb10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd590>, because it is not built.\n",
"W0321 14:53:27.751439 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd590>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdd50>, because it is not built.\n",
"W0321 14:53:27.751581 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdd50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd850>, because it is not built.\n",
"W0321 14:53:27.751746 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd850>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdb90>, because it is not built.\n",
"W0321 14:53:27.751924 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d2fdb90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd710>, because it is not built.\n",
"W0321 14:53:27.752061 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d2fd710>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3bb910>, because it is not built.\n",
"W0321 14:53:27.752222 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3bb910>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac390>, because it is not built.\n",
"W0321 14:53:27.752362 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc6701ac390>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d450150>, because it is not built.\n",
"W0321 14:53:27.752501 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d450150>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d38dd90>, because it is not built.\n",
"W0321 14:53:27.752661 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d38dd90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d38db50>, because it is not built.\n",
"W0321 14:53:27.752824 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d38db50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d38dd50>, because it is not built.\n",
"W0321 14:53:27.752974 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d38dd50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3ed7d0>, because it is not built.\n",
"W0321 14:53:27.753124 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3ed7d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3ed290>, because it is not built.\n",
"W0321 14:53:27.753266 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc63d3ed290>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3ed450>, because it is not built.\n",
"W0321 14:53:27.776399 140492178335616 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7fc63d3ed450>, because it is not built.\n",
"2021-03-21 14:53:36.179453: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 14:53:38.326043: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:53:38.326114: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 14:53:53.530833 140492178335616 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 534). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 14:53:57.965786 140492178335616 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 14:53:58.368563 140492178335616 builder_impl.py:775] Assets written to: /content/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Model compiled successfully in 1287 ms.\n",
"\n",
"Input model: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_320x320_coco17_quant.tflite\n",
"Input size: 3.70MiB\n",
"Output model: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_320x320_coco17_quant_edgetpu.tflite\n",
"Output size: 4.21MiB\n",
"On-chip memory used for caching model parameters: 3.42MiB\n",
"On-chip memory remaining for caching model parameters: 4.31MiB\n",
"Off-chip memory used for streaming uncached model parameters: 0.00B\n",
"Number of Edge TPU subgraphs: 1\n",
"Total number of operations: 162\n",
"Operation log: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_320x320_coco17_quant_edgetpu.log\n",
"\n",
"Model successfully compiled but not all operations are supported by the Edge TPU. A percentage of the model will instead run on the CPU, which is slower. If possible, consider updating your model to use only operations supported by the Edge TPU. For details, visit g.co/coral/model-reqs.\n",
"Number of operations that will run on Edge TPU: 112\n",
"Number of operations that will run on CPU: 50\n",
"\n",
"Operator Count Status\n",
"\n",
"RESHAPE 6 Mapped to Edge TPU\n",
"RESHAPE 2 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"RESHAPE 4 More than one subgraph is not supported\n",
"CONCATENATION 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"CONCATENATION 1 More than one subgraph is not supported\n",
"PACK 4 Tensor has unsupported rank (up to 3 innermost dimensions mapped)\n",
"LOGISTIC 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"ADD 2 More than one subgraph is not supported\n",
"ADD 10 Mapped to Edge TPU\n",
"CUSTOM 1 Operation is working on an unsupported data type\n",
"DEQUANTIZE 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"DEQUANTIZE 1 Operation is working on an unsupported data type\n",
"QUANTIZE 4 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"QUANTIZE 1 Mapped to Edge TPU\n",
"DEPTHWISE_CONV_2D 37 Mapped to Edge TPU\n",
"DEPTHWISE_CONV_2D 14 More than one subgraph is not supported\n",
"CONV_2D 14 More than one subgraph is not supported\n",
"CONV_2D 58 Mapped to Edge TPU\n",
"\n",
"\n",
"----- ssd_mobilenet_v2_fpnlite_640x640_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 14:54:51-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 108.177.97.128, 2404:6800:4008:c00::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|108.177.97.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 20518283 (20M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_mobilenet_v2_fp 100%[===================>] 19.57M 43.5MB/s in 0.4s \n",
"\n",
"2021-03-21 14:54:53 (43.5 MB/s) - ‘/content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’ saved [20518283/20518283]\n",
"\n",
"2021-03-21 14:54:55.200512: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:54:57.369929: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:54:57.375340: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:54:57.376046: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:54:57.376087: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:54:57.378987: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:54:57.379078: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:54:57.381006: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:54:57.381393: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:54:57.381510: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:54:57.382220: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:54:57.382422: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:54:57.382457: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:54:57.382748: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:54:57.382992: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:54:57.383041: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:55:05.135092: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:55:05.135143: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:55:05.173328: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 14:55:07.484199: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:55:07.484255: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f6a4059f5d0>, because it is not built.\n",
"W0321 14:55:08.369896 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f6a4059f5d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a40525c90>, because it is not built.\n",
"W0321 14:55:08.542722 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a40525c90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a4034f3d0>, because it is not built.\n",
"W0321 14:55:08.543035 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a4034f3d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a40292f50>, because it is not built.\n",
"W0321 14:55:08.543193 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a40292f50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a0bd11d10>, because it is not built.\n",
"W0321 14:55:08.543371 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a0bd11d10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b91ad10>, because it is not built.\n",
"W0321 14:55:08.543536 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b91ad10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bdda790>, because it is not built.\n",
"W0321 14:55:08.543683 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bdda790>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a0bbe93d0>, because it is not built.\n",
"W0321 14:55:08.543855 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a0bbe93d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0be26c10>, because it is not built.\n",
"W0321 14:55:08.544047 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0be26c10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8a2290>, because it is not built.\n",
"W0321 14:55:08.544189 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8a2290>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a40431910>, because it is not built.\n",
"W0321 14:55:08.544336 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.convolutional.SeparableConv2D object at 0x7f6a40431910>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b992850>, because it is not built.\n",
"W0321 14:55:08.544515 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b992850>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0baf1610>, because it is not built.\n",
"W0321 14:55:08.544667 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0baf1610>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a405290d0>, because it is not built.\n",
"W0321 14:55:08.544857 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a405290d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b9d6990>, because it is not built.\n",
"W0321 14:55:08.545043 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b9d6990>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a4033b9d0>, because it is not built.\n",
"W0321 14:55:08.545238 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a4033b9d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bc49610>, because it is not built.\n",
"W0321 14:55:08.545447 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bc49610>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9d6310>, because it is not built.\n",
"W0321 14:55:08.545634 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9d6310>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b9d6b50>, because it is not built.\n",
"W0321 14:55:08.545925 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b9d6b50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9d6d10>, because it is not built.\n",
"W0321 14:55:08.546126 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9d6d10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b999890>, because it is not built.\n",
"W0321 14:55:08.546337 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b999890>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a40529110>, because it is not built.\n",
"W0321 14:55:08.546528 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a40529110>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94e190>, because it is not built.\n",
"W0321 14:55:08.546718 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94e190>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bd54450>, because it is not built.\n",
"W0321 14:55:08.546913 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bd54450>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94ef90>, because it is not built.\n",
"W0321 14:55:08.547163 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94ef90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bcc2c10>, because it is not built.\n",
"W0321 14:55:08.547390 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bcc2c10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94ebd0>, because it is not built.\n",
"W0321 14:55:08.547551 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b94ebd0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b94e050>, because it is not built.\n",
"W0321 14:55:08.547714 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b94e050>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0ba88690>, because it is not built.\n",
"W0321 14:55:08.547873 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0ba88690>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b939e50>, because it is not built.\n",
"W0321 14:55:08.548032 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b939e50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b992410>, because it is not built.\n",
"W0321 14:55:08.548187 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b992410>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9dd410>, because it is not built.\n",
"W0321 14:55:08.548343 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b9dd410>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b899750>, because it is not built.\n",
"W0321 14:55:08.548498 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b899750>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bc20910>, because it is not built.\n",
"W0321 14:55:08.548637 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bc20910>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b899d90>, because it is not built.\n",
"W0321 14:55:08.548873 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b899d90>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bd13bd0>, because it is not built.\n",
"W0321 14:55:08.549060 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0bd13bd0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bb087d0>, because it is not built.\n",
"W0321 14:55:08.549314 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0bb087d0>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a40529150>, because it is not built.\n",
"W0321 14:55:08.549498 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a40529150>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b88af50>, because it is not built.\n",
"W0321 14:55:08.549665 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b88af50>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b824310>, because it is not built.\n",
"W0321 14:55:08.550029 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b824310>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a40296b10>, because it is not built.\n",
"W0321 14:55:08.550278 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a40296b10>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b824910>, because it is not built.\n",
"W0321 14:55:08.550463 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b824910>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8b5110>, because it is not built.\n",
"W0321 14:55:08.550609 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8b5110>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b8b5550>, because it is not built.\n",
"W0321 14:55:08.550772 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7f6a0b8b5550>, because it is not built.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8b5c10>, because it is not built.\n",
"W0321 14:55:08.556795 140096208312192 save_impl.py:81] Skipping full serialization of Keras layer <tensorflow.python.keras.layers.core.Lambda object at 0x7f6a0b8b5c10>, because it is not built.\n",
"2021-03-21 14:55:17.021697: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 14:55:19.179718: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:55:19.179797: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 14:55:34.525506 140096208312192 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 534). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 14:55:39.026639 140096208312192 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 14:55:39.417181 140096208312192 builder_impl.py:775] Assets written to: /content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Model compiled successfully in 1862 ms.\n",
"\n",
"Input model: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_640x640_coco17_quant.tflite\n",
"Input size: 4.28MiB\n",
"Output model: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_640x640_coco17_quant_edgetpu.tflite\n",
"Output size: 5.05MiB\n",
"On-chip memory used for caching model parameters: 3.42MiB\n",
"On-chip memory remaining for caching model parameters: 3.57MiB\n",
"Off-chip memory used for streaming uncached model parameters: 0.00B\n",
"Number of Edge TPU subgraphs: 1\n",
"Total number of operations: 162\n",
"Operation log: /content/tflite_v2_models/ssd_mobilenet_v2_fpnlite_640x640_coco17_quant_edgetpu.log\n",
"\n",
"Model successfully compiled but not all operations are supported by the Edge TPU. A percentage of the model will instead run on the CPU, which is slower. If possible, consider updating your model to use only operations supported by the Edge TPU. For details, visit g.co/coral/model-reqs.\n",
"Number of operations that will run on Edge TPU: 111\n",
"Number of operations that will run on CPU: 51\n",
"\n",
"Operator Count Status\n",
"\n",
"ADD 10 Mapped to Edge TPU\n",
"ADD 2 More than one subgraph is not supported\n",
"CUSTOM 1 Operation is working on an unsupported data type\n",
"DEQUANTIZE 1 Operation is working on an unsupported data type\n",
"DEQUANTIZE 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"QUANTIZE 1 Mapped to Edge TPU\n",
"QUANTIZE 4 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"CONV_2D 58 Mapped to Edge TPU\n",
"CONV_2D 14 More than one subgraph is not supported\n",
"DEPTHWISE_CONV_2D 37 Mapped to Edge TPU\n",
"DEPTHWISE_CONV_2D 14 More than one subgraph is not supported\n",
"LOGISTIC 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"RESHAPE 5 Mapped to Edge TPU\n",
"RESHAPE 5 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"RESHAPE 2 More than one subgraph is not supported\n",
"CONCATENATION 1 Operation is otherwise supported, but not mapped due to some unspecified limitation\n",
"CONCATENATION 1 More than one subgraph is not supported\n",
"PACK 4 Tensor has unsupported rank (up to 3 innermost dimensions mapped)\n",
"\n",
"\n",
"----- ssd_resnet50_v1_fpn_640x640_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 14:58:15-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c00::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 244817203 (233M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet50_v1_fpn 100%[===================>] 233.48M 79.4MB/s in 2.9s \n",
"\n",
"2021-03-21 14:58:18 (79.4 MB/s) - ‘/content/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz’ saved [244817203/244817203]\n",
"\n",
"2021-03-21 14:58:22.896644: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:58:25.077025: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 14:58:25.082231: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 14:58:25.083041: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 14:58:25.083085: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 14:58:25.086581: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 14:58:25.086654: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 14:58:25.088746: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 14:58:25.089161: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 14:58:25.089275: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 14:58:25.089979: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 14:58:25.090189: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 14:58:25.090224: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 14:58:25.090470: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 14:58:25.090648: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:58:25.090683: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:58:33.951869: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:58:33.951933: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 14:58:33.982970: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 14:58:36.227849: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:58:36.227924: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f92d18a7250>, because it is not built.\n",
"W0321 14:58:37.140752 140270491105152 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f92d18a7250>, because it is not built.\n",
"2021-03-21 14:58:46.073721: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 14:58:48.052782: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 14:58:48.052851: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 14:59:04.430443 140270491105152 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 878). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 14:59:09.810531 140270491105152 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 14:59:10.477668 140270491105152 builder_impl.py:775] Assets written to: /content/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- ssd_resnet50_v1_fpn_1024x1024_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 15:11:36-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 108.177.97.128, 2404:6800:4008:c03::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|108.177.97.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 244755628 (233M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet50_v1_fpn 100%[===================>] 233.42M 45.6MB/s in 5.1s \n",
"\n",
"2021-03-21 15:11:42 (45.6 MB/s) - ‘/content/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’ saved [244755628/244755628]\n",
"\n",
"2021-03-21 15:11:47.096928: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 15:11:49.316915: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 15:11:49.322276: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 15:11:49.322993: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 15:11:49.323033: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 15:11:49.327123: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 15:11:49.327201: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 15:11:49.330130: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 15:11:49.330584: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 15:11:49.330726: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 15:11:49.331637: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 15:11:49.331917: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 15:11:49.331959: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 15:11:49.332293: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 15:11:49.332485: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:11:49.332515: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 15:11:58.167392: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:11:58.167441: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 15:11:58.197763: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 15:12:00.464123: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:12:00.464179: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7ff9a0a2b5d0>, because it is not built.\n",
"W0321 15:12:01.388571 140712052266880 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7ff9a0a2b5d0>, because it is not built.\n",
"2021-03-21 15:12:10.318024: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 15:12:12.296114: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:12:12.296199: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 15:12:28.666875 140712052266880 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 878). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 15:12:34.119299 140712052266880 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 15:12:34.766357 140712052266880 builder_impl.py:775] Assets written to: /content/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- ssd_resnet101_v1_fpn_640x640_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 15:43:09-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c03::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 386527459 (369M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet101_v1_fp 100%[===================>] 368.62M 82.1MB/s in 4.5s \n",
"\n",
"2021-03-21 15:43:14 (82.1 MB/s) - ‘/content/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.tar.gz’ saved [386527459/386527459]\n",
"\n",
"2021-03-21 15:43:20.484681: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 15:43:22.743972: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 15:43:22.749289: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 15:43:22.749987: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 15:43:22.750031: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 15:43:22.754510: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 15:43:22.754593: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 15:43:22.757164: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 15:43:22.757883: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 15:43:22.758011: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 15:43:22.758777: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 15:43:22.759043: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 15:43:22.759080: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 15:43:22.759335: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 15:43:22.759522: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:43:22.759555: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 15:43:35.319266: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:43:35.319318: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 15:43:35.357840: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 15:43:38.104424: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:43:38.104493: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f1630cd3410>, because it is not built.\n",
"W0321 15:43:39.260831 139735119538048 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f1630cd3410>, because it is not built.\n",
"2021-03-21 15:43:56.326269: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 15:43:58.880547: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 15:43:58.880604: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 15:44:28.303315 139735119538048 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 1218). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 15:44:37.093323 139735119538048 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 15:44:38.306188 139735119538048 builder_impl.py:775] Assets written to: /content/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- ssd_resnet101_v1_fpn_1024x1024_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 16:01:20-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c01::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 386640082 (369M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet101_v1_fp 100%[===================>] 368.73M 73.0MB/s in 5.0s \n",
"\n",
"2021-03-21 16:01:26 (73.0 MB/s) - ‘/content/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’ saved [386640082/386640082]\n",
"\n",
"2021-03-21 16:01:32.256236: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 16:01:34.538516: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 16:01:34.544255: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 16:01:34.545072: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 16:01:34.545113: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 16:01:34.548094: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 16:01:34.548171: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 16:01:34.550380: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 16:01:34.550769: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 16:01:34.550962: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 16:01:34.551688: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 16:01:34.551947: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 16:01:34.551981: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 16:01:34.552225: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 16:01:34.552505: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:01:34.552531: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 16:01:46.757527: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:01:46.757580: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 16:01:46.794021: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 16:01:49.531861: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:01:49.531922: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f6e6070d090>, because it is not built.\n",
"W0321 16:01:50.734616 140113957996416 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7f6e6070d090>, because it is not built.\n",
"2021-03-21 16:02:07.710367: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 16:02:10.303988: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:02:10.304066: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 16:02:39.888787 140113957996416 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 1218). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 16:02:48.702218 140113957996416 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 16:02:49.806411 140113957996416 builder_impl.py:775] Assets written to: /content/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- ssd_resnet152_v1_fpn_640x640_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 16:41:47-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c00::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 503650646 (480M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet152_v1_fp 100%[===================>] 480.32M 57.0MB/s in 8.4s \n",
"\n",
"2021-03-21 16:41:56 (57.1 MB/s) - ‘/content/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.tar.gz’ saved [503650646/503650646]\n",
"\n",
"2021-03-21 16:42:04.882484: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 16:42:07.098566: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 16:42:07.104231: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 16:42:07.104970: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 16:42:07.105014: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 16:42:07.108314: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 16:42:07.108393: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 16:42:07.110620: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 16:42:07.111031: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 16:42:07.111177: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 16:42:07.112107: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 16:42:07.112354: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 16:42:07.112382: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 16:42:07.112702: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 16:42:07.112934: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:42:07.112968: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 16:42:24.028584: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:42:24.028658: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 16:42:24.075220: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 16:42:27.385216: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:42:27.385273: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fa1e0140450>, because it is not built.\n",
"W0321 16:42:29.007467 140335060641664 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fa1e0140450>, because it is not built.\n",
"2021-03-21 16:42:54.140084: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 16:42:57.394091: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 16:42:57.394150: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 16:43:40.017423 140335060641664 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 1558). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 16:43:52.846947 140335060641664 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 16:43:54.419669 140335060641664 builder_impl.py:775] Assets written to: /content/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- ssd_resnet152_v1_fpn_1024x1024_coco17 -----\n",
"\n",
"\n",
"--2021-03-21 17:08:32-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c01::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 504180168 (481M) [application/x-tar]\n",
"Saving to: ‘/content/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’\n",
"\n",
"ssd_resnet152_v1_fp 100%[===================>] 480.82M 71.1MB/s in 7.3s \n",
"\n",
"2021-03-21 17:08:41 (66.2 MB/s) - ‘/content/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.tar.gz’ saved [504180168/504180168]\n",
"\n",
"2021-03-21 17:08:49.187006: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 17:08:51.448193: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 17:08:51.459279: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 17:08:51.459940: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 17:08:51.459983: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 17:08:51.462969: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 17:08:51.463040: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 17:08:51.465108: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 17:08:51.465451: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 17:08:51.466543: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 17:08:51.467214: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 17:08:51.467428: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 17:08:51.467463: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 17:08:51.467788: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 17:08:51.467972: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 17:08:51.468006: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 17:09:08.064380: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 17:09:08.064458: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"2021-03-21 17:09:08.113845: I tensorflow/core/platform/profile_utils/cpu_utils.cc:114] CPU Frequency: 2299995000 Hz\n",
"2021-03-21 17:09:11.266063: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 17:09:11.266122: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fcfd58ef990>, because it is not built.\n",
"W0321 17:09:12.802162 140532355409792 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fcfd58ef990>, because it is not built.\n",
"2021-03-21 17:09:36.868384: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"2021-03-21 17:09:39.868043: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 17:09:39.868112: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"W0321 17:10:20.404126 140532355409792 save.py:243] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxPredictor_layer_call_fn while saving (showing 5 of 1558). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 17:10:32.467896 140532355409792 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"I0321 17:10:34.011225 140532355409792 builder_impl.py:775] Assets written to: /content/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"\n",
"Internal compiler error. Aborting! \n",
"\n",
"\n",
"----- centernet_mobilenetv2fpn_512x512_coco17_od -----\n",
"\n",
"\n",
"--2021-03-21 18:00:46-- http://download.tensorflow.org/models/object_detection/tf2/20210210/centernet_mobilenetv2fpn_512x512_coco17_kpts.tar.gz\n",
"Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.203.128, 2404:6800:4008:c00::80\n",
"Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.203.128|:80... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 44635028 (43M) [application/x-tar]\n",
"Saving to: ‘/content/centernet_mobilenetv2fpn_512x512_coco17_kpts.tar.gz’\n",
"\n",
"centernet_mobilenet 100%[===================>] 42.57M 75.8MB/s in 0.6s \n",
"\n",
"2021-03-21 18:00:47 (75.8 MB/s) - ‘/content/centernet_mobilenetv2fpn_512x512_coco17_kpts.tar.gz’ saved [44635028/44635028]\n",
"\n",
"2021-03-21 18:00:48.953040: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.\n",
"W0321 18:00:51.109288 140547757721472 mobilenet_v2.py:299] `input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.\n",
"2021-03-21 18:00:51.110684: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2021-03-21 18:00:51.116685: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2021-03-21 18:00:51.117378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \n",
"pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n",
"coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n",
"2021-03-21 18:00:51.117459: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-03-21 18:00:51.127347: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
"2021-03-21 18:00:51.127437: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
"2021-03-21 18:00:51.129562: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2021-03-21 18:00:51.129964: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2021-03-21 18:00:51.130125: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.11'; dlerror: libcusolver.so.11: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia\n",
"2021-03-21 18:00:51.130851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
"2021-03-21 18:00:51.131089: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
"2021-03-21 18:00:51.131138: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1766] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
"Skipping registering GPU devices...\n",
"2021-03-21 18:00:51.131435: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2021-03-21 18:00:51.131656: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2021-03-21 18:00:51.131706: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] \n",
"Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5\n",
"9412608/9406464 [==============================] - 0s 0us/step\n",
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/impl/api.py:464: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use `tf.cast` instead.\n",
"W0321 18:00:57.410170 140547757721472 deprecation.py:336] From /usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/impl/api.py:464: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use `tf.cast` instead.\n",
"WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.center_net_meta_arch.CenterNetMetaArch object at 0x7fd32e03be50>, because it is not built.\n",
"W0321 18:00:58.759344 140547757721472 save_impl.py:81] Skipping full serialization of Keras layer <object_detection.meta_architectures.center_net_meta_arch.CenterNetMetaArch object at 0x7fd32e03be50>, because it is not built.\n",
"2021-03-21 18:01:12.407506: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n",
"W0321 18:01:30.726729 140547757721472 save.py:243] Found untraced functions such as Conv1_layer_call_fn, bn_Conv1_layer_call_fn, Conv1_relu_layer_call_fn, expanded_conv_depthwise_BN_layer_call_fn, expanded_conv_depthwise_relu_layer_call_fn while saving (showing 5 of 290). These functions will not be directly callable after loading.\n",
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:500: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n",
"WARNING:tensorflow:FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"W0321 18:01:35.646472 140547757721472 save.py:1240] FOR KERAS USERS: The object that you are saving contains one or more Keras models or layers. If you are loading the SavedModel with `tf.keras.models.load_model`, continue reading (otherwise, you may ignore the following instructions). Please change your code to save with `tf.keras.models.save_model` or `model.save`, and confirm that the file \"keras.metadata\" exists in the export directory. In the future, Keras will only load the SavedModels that have this file. In other words, `tf.saved_model.save` will no longer write SavedModels that can be recovered as Keras models (this will apply in TF 2.5).\n",
"\n",
"FOR DEVS: If you are overwriting _tracking_metadata in your class, this property has been used to save metadata in the SavedModel. The metadta field will be deprecated soon, so please move the metadata to a different file.\n",
"INFO:tensorflow:Assets written to: /content/centernet_mobilenetv2_fpn_kpts/tflite/saved_model/assets\n",
"I0321 18:01:35.964854 140547757721472 builder_impl.py:775] Assets written to: /content/centernet_mobilenetv2_fpn_kpts/tflite/saved_model/assets\n",
"Edge TPU Compiler version 15.0.340273435\n",
"Invalid model: /content/tflite_v2_models/centernet_mobilenetv2fpn_512x512_coco17_od_quant.tflite\n",
"Model not quantized\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "SopYnQRdBzP3"
},
"source": [
"!rm -rf /content/centernet_mobilenetv2_fpn_od\n",
"!rm -rf /content/centernet_mobilenetv2fpn_512x512_coco17_od.tar.gz*"
],
"execution_count": 18,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "-00rT8pOV_mq"
},
"source": [
"# File compression amd copy google drive."
]
},
{
"cell_type": "code",
"metadata": {
"id": "BVIDYE0QWGZl",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "7e5c4ebd-3255-4ec2-9595-19ac283fd66f"
},
"source": [
"!tar czf /content/tflite_v2_model.tar.gz /content/tflite_v2_models/"
],
"execution_count": 19,
"outputs": [
{
"output_type": "stream",
"text": [
"tar: Removing leading `/' from member names\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "RwhPcF5d2i-B"
},
"source": [
"!cp /content/tflite_v2_model.tar.gz /content/drive/My\\ Drive"
],
"execution_count": 20,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment