aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar Michael Case <mikecase@google.com>2018-06-26 10:56:16 -0700
committerGravatar Michael Case <mikecase@google.com>2018-06-26 10:56:16 -0700
commit7f1056bcc9af72f6ed68939423362e390ce6ad8b (patch)
treecc434c644a508ac442f79d4463f72c929a017444 /tensorflow/contrib
parent343b373e3386f11a16a5216574492ca56bfd7050 (diff)
parentf2813bf6e4f7f415f012307a03fd5b9fb5822d28 (diff)
Merge commit for internal changes
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb311
-rw-r--r--tensorflow/contrib/cmake/tf_c.cmake13
-rwxr-xr-xtensorflow/contrib/cmake/tf_python.cmake12
-rw-r--r--tensorflow/contrib/data/kernels/prefetching_kernels.cc10
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py30
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/blocks.py39
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/blocks_test.py48
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/cifar_input.py35
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/config.py12
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/main.py169
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet.py39
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet_test.py14
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn.py17
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py19
-rw-r--r--tensorflow/contrib/estimator/python/estimator/linear.py17
-rw-r--r--tensorflow/contrib/lite/java/demo/app/build.gradle3
-rw-r--r--tensorflow/contrib/lite/python/lite.py45
-rw-r--r--tensorflow/contrib/lite/python/lite_test.py276
-rw-r--r--tensorflow/contrib/lite/python/tflite_convert.py7
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/python_api.md46
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc2
-rw-r--r--tensorflow/contrib/summary/summary_ops_test.py13
22 files changed, 887 insertions, 290 deletions
diff --git a/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb b/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
index 324b23c24b..44532cb078 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
@@ -190,7 +190,6 @@
" self.upper_cell = tf.contrib.rnn.LSTMBlockCell(128)\n",
" self.relu_layer = tf.layers.Dense(3, activation=tf.nn.relu)\n",
"\n",
- "\n",
" def _rnn_layer(self, chars, cell, batch_size, training):\n",
" \"\"\"A single RNN layer.\n",
"\n",
@@ -203,13 +202,12 @@
" Returns:\n",
" A Tensor of shape (max_sequence_length, batch_size, output_size).\n",
" \"\"\"\n",
- " hidden_outputs = []\n",
- " autograph.utils.set_element_type(hidden_outputs, tf.float32)\n",
+ " hidden_outputs = tf.TensorArray(tf.float32, 0, True)\n",
" state, output = cell.zero_state(batch_size, tf.float32)\n",
" for ch in chars:\n",
" cell_output, (state, output) = cell.call(ch, (state, output))\n",
" hidden_outputs.append(cell_output)\n",
- " hidden_outputs = hidden_outputs.stack()\n",
+ " hidden_outputs = autograph.stack(hidden_outputs)\n",
" if training:\n",
" hidden_outputs = tf.nn.dropout(hidden_outputs, 0.5)\n",
" return hidden_outputs\n",
@@ -223,7 +221,7 @@
"\n",
"\n",
" def call(self, inputs, training=False):\n",
- " \"\"\"The RNN model code. Uses Eager and \n",
+ " \"\"\"The RNN model code. Uses Eager.\n",
"\n",
" The model consists of two RNN layers (made by lower_cell and upper_cell),\n",
" followed by a fully connected layer with ReLU activation.\n",
@@ -243,7 +241,8 @@
" seq = self._rnn_layer(seq, self.upper_cell, batch_size, training)\n",
"\n",
" # Grab just the end-of-sequence from each output.\n",
- " indices = tf.stack([length - 1, range(batch_size)], axis=1)\n",
+ " indices = (length - 1, range(batch_size))\n",
+ " indices = tf.stack(indices, 1)\n",
" sequence_ends = tf.gather_nd(seq, indices)\n",
" return self.relu_layer(sequence_ends)\n",
"\n",
@@ -381,7 +380,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 107,
"metadata": {
"colab": {
"autoexec": {
@@ -392,9 +391,9 @@
},
"colab_type": "code",
"executionInfo": {
- "elapsed": 10604,
+ "elapsed": 5454,
"status": "ok",
- "timestamp": 1524095272039,
+ "timestamp": 1529952160455,
"user": {
"displayName": "",
"photoUrl": "",
@@ -403,7 +402,7 @@
"user_tz": 240
},
"id": "2pg1AfbxBJQq",
- "outputId": "9c924b4f-06e1-4538-976c-a3e1ddac5660",
+ "outputId": "4aef3052-f7c7-4bb1-a0a2-73fef2e96efb",
"slideshow": {
"slide_type": "-"
}
@@ -413,7 +412,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Eval loss at step 100: 0.0674834\n"
+ "Eval loss at step 100: 0.0705221\n"
]
}
],
@@ -423,8 +422,8 @@
" 'learning_rate': 0.01,\n",
"}\n",
"\n",
- "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv\"\n",
- "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv\"\n",
+ "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv\"\n",
+ "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv\"\n",
"data_dir = \"tmp/rnn/data\"\n",
"\n",
"regressor = tf.estimator.Estimator(\n",
@@ -457,7 +456,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 108,
"metadata": {
"colab": {
"autoexec": {
@@ -468,9 +467,9 @@
},
"colab_type": "code",
"executionInfo": {
- "elapsed": 7990,
+ "elapsed": 3432,
"status": "ok",
- "timestamp": 1524095280105,
+ "timestamp": 1529952163923,
"user": {
"displayName": "",
"photoUrl": "",
@@ -479,7 +478,7 @@
"user_tz": 240
},
"id": "dxHex2tUN_10",
- "outputId": "2b889e5a-b9ed-4645-bf03-d98f26c72101",
+ "outputId": "1ff438f2-b045-4f4e-86a0-4dae7503f6b2",
"slideshow": {
"slide_type": "slide"
}
@@ -491,12 +490,12 @@
"\u003clink rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'\u003e\u003c/link\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3f36aa6cd0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a110\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -507,12 +506,12 @@
"\u003cscript src='/nbextensions/google.colab/tabbar_main.min.js'\u003e\u003c/script\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3eca67f7d0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a8d0\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -520,15 +519,15 @@
{
"data": {
"text/html": [
- "\u003cdiv id=\"id1\"\u003e\u003c/div\u003e"
+ "\u003cdiv id=\"id3\"\u003e\u003c/div\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3eca67f8d0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a050\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -536,16 +535,16 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa22-4362-11e8-91ec-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"elementId\": \"id1\", \"borderColor\": [\"#a7a7a7\"], \"contentHeight\": [\"initial\"], \"tabNames\": [\"RNN Colorbot\"], \"location\": \"top\", \"initialSelection\": 0});\n",
- "//# sourceURL=js_71b9087b6d"
+ "window[\"8a03307e-78a7-11e8-99f9-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"elementId\": \"id3\", \"contentHeight\": [\"initial\"], \"tabNames\": [\"RNN Colorbot\"], \"location\": \"top\", \"initialSelection\": 0, \"borderColor\": [\"#a7a7a7\"]});\n",
+ "//# sourceURL=js_dc5d7f2784"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f950\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a190\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -553,16 +552,16 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa23-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_e390445f33"
+ "window[\"8a03307f-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_be7950150b"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f990\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ac90\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -570,17 +569,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa24-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_241dd76d85"
+ "window[\"8a033080-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_d0c3bd4eaa"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222aad0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -588,17 +587,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa25-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_60c64e3d50"
+ "window[\"8a033081-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_f10f6eba86"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fd90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222aed0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -606,17 +605,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa26-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"e8ddfa25-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_14ea437cbd"
+ "window[\"8a033082-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8a033081-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ff29697179"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe10\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222abd0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -624,17 +623,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa27-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_09294c2226"
+ "window[\"8a033083-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_ff85295dc7"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fcd0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ab90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -642,17 +641,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965514-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"e8ddfa24-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_e5e8266997"
+ "window[\"8b18d8dc-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8a033080-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ed7aabfedb"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe10\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a110\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -660,17 +659,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965515-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_07a097f0ee"
+ "window[\"8b18d8dd-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_c86f8feaf4"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222acd0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -678,17 +677,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965516-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_790d669ca8"
+ "window[\"8b18d8de-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_4d0fde6662"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f8d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ae50\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -696,17 +695,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965517-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec965516-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_d30df771f0"
+ "window[\"8b18d8df-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8de-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3f66d52720"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fd90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a210\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -714,32 +713,32 @@
{
"data": {
"application/javascript": [
- "window[\"ec965518-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_8a43a2da4b"
+ "window[\"8b18d8e0-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_375f5ae6d7"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a310\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACMBJREFUeJzt3F+I1XX+x/G32zjiFERUpgaFd2JBzOg5joX4h0SiMgmM\n/uhVGIlgFBlERGB3hUEkhkRdtDfRP1ACL6KpLBqcguxCjEAkmGamQcSohFHzsxe7O6zssvsydtff\n+ns8rs758j3f8z7fiyef7/k3o7XWCiDwh4s9APC/QzCAmGAAMcEAYoIBxAQDiAkGF8XTTz9d3W63\n7rvvvhoZGakVK1Zc7JEICMYlbvXq1TU8PHyxxzjPV199VcPDw/XZZ5/V22+/XVVVM2bMuMhTkRAM\n/qt+++23+uGHH+r666+vWbNmXexxuECCcQl76qmnanx8vLZs2VIDAwP1+uuv1zfffFP3339/dTqd\nWr9+fY2MjEzvv2nTpnr55ZfrgQceqIGBgXr44Yfr5MmTVVV1+vTp2r59ey1durQ6nU5t2LChTpw4\nUVVVk5OTtWXLllq6dGmtXbu23nnnnelj7tq1q7Zt21bbt2+vJUuW1HvvvVfPPvtsHTp0qAYGBmrX\nrl1/N/fRo0dr06ZN1el06u67766hoaGqqhodHa1OpzO93zPPPFO33nrr9P3t27fXm2+++e89iZyv\ncUlbtWpVGx4ebq21NjEx0brdbjtw4EBrrbUvvviidbvdduLEidZaaxs3bmxr1qxp33//fZuammob\nN25sO3fubK219tZbb7VHH320TU1NtXPnzrXDhw+3X375pbXW2kMPPdR27NjRTp8+3Y4cOdIGBwen\nn/OVV15pN910U/voo49aa61NTU21999/vz344IPTMx48eLCtWLGitdbamTNn2po1a9qePXvamTNn\n2vDwcOvv72/Hjh2bfj2HDx9urbW2du3advvtt7ejR4+21lpbuXJlO3LkyH/qVNJas8L4f6D95edC\n+/btq5UrV9by5curqmrZsmV1880316effjq977333ls33HBD9fb21h133FFHjhypqqqenp46efJk\nHTt2rGbMmFGLFi2qyy+/vCYmJurrr7+uJ598smbOnFkLFy6sDRs21N69e6eP2d/fX6tXr66qqt7e\n3n8666FDh+rUqVP1yCOPVE9PTw0ODtaqVavqgw8+qKqqJUuW1MjISB0/fryqqtauXVtffvlljY6O\n1q+//loLFy78N501/pGeiz0A/z1jY2O1f//++vjjj6vqzyE5e/ZsLVu2bHqfa665Zvr27Nmz69Sp\nU1VVdc8999TExEQ98cQT9fPPP9e6devq8ccfr8nJybryyitr9uzZ04+bP39+HT58ePr+3Llz4xkn\nJydr3rx5522bP39+TU5OVlVVp9OpoaGhuu6666rb7Va32629e/dWb29vLV68+ALOBr+HYFzi/vbT\nh3nz5tX69etrx44dF3ycnp6e2rp1a23durXGxsZq8+bNtWDBgrrtttvqp59+qlOnTlVfX19VVY2P\nj9ecOXP+4Qz/ypw5c2p8fPy8bWNjY7VgwYKqqup2u/Xiiy/WvHnzqtPp1MDAQD333HPV29tb3W73\ngl8XF8YlySXu2muvrdHR0aqqWrduXQ0NDdXnn39e586dq6mpqRoZGakff/zxXx7n4MGD9d1339W5\nc+eqr6+venp66rLLLqu5c+dWf39/vfTSS3X69On69ttv6913361169b9rnlvueWW6uvrq9dee63O\nnj1bBw8erE8++aTuvPPOqqq68cYba9asWbVv377qdDp1xRVX1NVXX10ffvjheW+I8p8hGJe4zZs3\n1+7du6vb7db+/ftr9+7dtWfPnlq2bFmtWrWq3njjjen3OP7ZSuD48eO1bdu2Wrx4cd111121dOnS\n6Sjs3LmzRkdHa/ny5bVt27Z67LHHzrvMuRAzZ86sV199tQ4cOFCDg4P1/PPP1wsvvDC9wqj68yrj\nqquumr7U+WsoFi1a9Luek9yM1vyBDpCxwgBiggHEBAOICQYQ+z/7PYzjf/QRGVxM12z68u+2WWEA\nMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHE\nBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhAT\nDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEww\ngJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOI\nCQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAm\nGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhg\nADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIB\nxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQ\nEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBM\nMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHB\nAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQD\niAkGEBMMIDajtdYu9hDA/wYrDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4j9CY2LTAbbRbWuAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABTFJREFUeJzt3C+LV30eh/HP6EZvbP4ZJmkXDA6oQdZRMIhYLIKCMGVA\nyyaLT2ERLMqEDfoUFA2y3WpRrOKoSUSECePcYUEWdsN1OzfOyr5e8ZwT3unie34cfgvb29vbAxDs\n2e0BwK9DMIBMMIBMMIBMMIBMMIBMMPipXrx4MWfOnNntGfwgweCnW1hY2O0J/CDBYEe2trZ2ewI/\nkWDwh509e3bW19fn0qVLc/z48dnY2Jhbt27NyZMn59y5c/Pw4cPvz25ubs7t27dneXl5Ll68OC9f\nvtzF5ezUX3Z7AL+mJ0+ezPr6+uzfv3+uXr0658+fn7t3787GxsbcuHFjjhw5MqdPn5579+7N27dv\n5/nz5/P169dZXV3d7ensgBMGP+T69etz8ODBef369Xz69GnW1tZm7969s7S0NFeuXJnHjx/PzMzT\np09nbW1tfvvttzl48OBcu3Ztl5ezE04Y/JBDhw7NzMy7d+/mw4cPs7y8PDMz29vb8+3btzlx4sTM\nzHz8+PH7szMzi4uLP38sfxrBYEcOHz48S0tL8+zZs/96/8CBA7OxsTFHjx6dmX8Fhl+XVxJ25Nix\nY7Nv375ZX1+fzc3N2dramjdv3nz/cfPChQvz4MGD+fz587x//34ePXq0y4vZCcHgD/v37yj27Nkz\n9+/fn1evXs3KysqcOnVq7ty5M1++fJmZmZs3b87i4uKsrKzM6urqXL58ebdm8ydY8Ac6QOWEAWSC\nAWSCAWSCAWT/s99h/P3GX3d7Avxf+9s//vkf15wwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgGxhe3t7e7dHAL8GJwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwg\nEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwg+x1QoZHG4XIe4gAAAABJRU5ErkJggg==\n",
"text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f3ecc00bf10\u003e"
+ "\u003cmatplotlib.figure.Figure at 0x7fcd0d02dc90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -748,17 +747,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965519-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec965515-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_893ad561f4"
+ "window[\"8b18d8e1-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8dd-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_34b0509660"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55c90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e850\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -766,17 +765,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551a-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_2d99e0ac17"
+ "window[\"8b18d8e2-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_518a0f26fe"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6ec90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -784,17 +783,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551b-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_5c19462e32"
+ "window[\"8b18d8e3-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_17eb3ff612"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55dd0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6eb50\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -802,17 +801,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551c-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec96551b-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_b9c8b7567b"
+ "window[\"8b18d8e4-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8e3-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_99da807c8e"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55a50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6eb90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -820,17 +819,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551d-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_fd05186348"
+ "window[\"8b18d8e5-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_dee01cb4b6"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55810\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e610\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -838,16 +837,16 @@
{
"data": {
"text/html": [
- "\u003cdiv class=id_888646481 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
+ "\u003cdiv class=id_853612217 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3f32414810\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222aa10\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -856,17 +855,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551e-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 span\");\n",
- "//# sourceURL=js_efef96e882"
+ "window[\"8b18d8e6-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 span\");\n",
+ "//# sourceURL=js_8c378be329"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55710\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e990\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -875,17 +874,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551f-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ec96551e-4362-11e8-91ec-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
- "//# sourceURL=js_6eca889864"
+ "window[\"8b18d8e7-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8e6-78a7-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_f0b946600c"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f990\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e310\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -894,17 +893,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea972-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 input\");\n",
- "//# sourceURL=js_f02070cc60"
+ "window[\"8b18d8e9-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 input\");\n",
+ "//# sourceURL=js_9e21b1373a"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b553d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6ea90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -913,17 +912,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea973-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ed8ea972-4362-11e8-91ec-c8d3ffb5fbe0\"].remove();\n",
- "//# sourceURL=js_ed9faba660"
+ "window[\"8b18d8ea-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8e9-78a7-11e8-99f9-c8d3ffb5fbe0\"].remove();\n",
+ "//# sourceURL=js_a7764968c6"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a95450\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e5d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -932,17 +931,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea974-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 span\");\n",
- "//# sourceURL=js_f3458d7074"
+ "window[\"8b18d8eb-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 span\");\n",
+ "//# sourceURL=js_74279d3ff0"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a95250\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e890\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -951,17 +950,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea975-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ed8ea974-4362-11e8-91ec-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
- "//# sourceURL=js_3ffd97bd6f"
+ "window[\"8b18d8ec-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8eb-78a7-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_82b6c34cdb"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a953d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e8d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -970,17 +969,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea976-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec96551a-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_7f73e8bcca"
+ "window[\"8b18d8ed-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8e2-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ff6144734a"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55710\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e8d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -1043,28 +1042,6 @@
"kind": "local"
},
"name": "RNN Colorbot using Keras and Estimators",
- "provenance": [
- {
- "file_id": "1CtzefX39ffFibX_BqE6cRbT0UW_DdVKl",
- "timestamp": 1523579810961
- },
- {
- "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
- "timestamp": 1523016192637
- },
- {
- "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
- "timestamp": 1522238054357
- },
- {
- "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
- "timestamp": 1521743157199
- },
- {
- "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
- "timestamp": 1520522344607
- }
- ],
"version": "0.3.2",
"views": {}
},
diff --git a/tensorflow/contrib/cmake/tf_c.cmake b/tensorflow/contrib/cmake/tf_c.cmake
index 2e0a2fcef4..7a30eb94f5 100644
--- a/tensorflow/contrib/cmake/tf_c.cmake
+++ b/tensorflow/contrib/cmake/tf_c.cmake
@@ -36,16 +36,3 @@ add_dependencies(
tf_cc_while_loop
tf_core_lib
tf_protos_cc)
-
-if(tensorflow_BUILD_PYTHON_BINDINGS)
- add_library(tf_c_python_api OBJECT
- "${tensorflow_source_dir}/tensorflow/c/python_api.cc"
- "${tensorflow_source_dir}/tensorflow/c/python_api.h"
- )
- add_dependencies(
- tf_c_python_api
- tf_c
- tf_core_lib
- tf_core_framework
- tf_protos_cc)
-endif()
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 786ea05c74..e3b59001bc 100755
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -456,6 +456,18 @@ add_custom_command(
COMMENT "Running SWIG to generate Python wrappers"
VERBATIM )
+add_library(tf_c_python_api OBJECT
+ "${tensorflow_source_dir}/tensorflow/c/python_api.cc"
+ "${tensorflow_source_dir}/tensorflow/c/python_api.h"
+)
+add_dependencies(
+ tf_c_python_api
+ tf_c
+ tf_core_lib
+ tf_core_framework
+ tf_protos_cc
+ tf_python_protos_cc)
+
set (pywrap_tensorflow_internal_src
"${tensorflow_source_dir}/tensorflow/core/profiler/internal/print_model_analysis.h"
"${tensorflow_source_dir}/tensorflow/core/profiler/internal/print_model_analysis.cc"
diff --git a/tensorflow/contrib/data/kernels/prefetching_kernels.cc b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
index a2bfce0362..0fc3773475 100644
--- a/tensorflow/contrib/data/kernels/prefetching_kernels.cc
+++ b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
@@ -269,18 +269,20 @@ class FunctionBufferResourceHandleOp : public OpKernel {
std::vector<Tensor> func_args;
func_args.push_back(*string_arg);
+ const string& source_device = ctx->device()->name();
+
// Obtain and canonicalize target_device.
const Tensor* target_arg;
OP_REQUIRES_OK(ctx, ctx->input("target_device", &target_arg));
- const string& target_device =
- DeviceNameUtils::CanonicalizeDeviceName(target_arg->scalar<string>()());
+ string target_device;
+ OP_REQUIRES_OK(ctx, DeviceNameUtils::CanonicalizeDeviceName(
+ target_arg->scalar<string>()(), source_device,
+ &target_device));
FunctionLibraryRuntime* lib = ctx->function_library();
OP_REQUIRES(ctx, lib != nullptr,
errors::Internal("No function library is provided."));
- const string& source_device = ctx->device()->name();
-
mutex_lock l(mu_);
if (!initialized_) {
OP_REQUIRES_OK(ctx, cinfo_.Init(ctx->resource_manager(), def()));
diff --git a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
index b08132cd72..9c7040de9e 100644
--- a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
@@ -235,6 +235,36 @@ class PrefetchToDeviceTest(test.TestCase):
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
+ def testPrefetchToSameDevice(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.prefetch_to_device(
+ "/job:localhost/replica:0/task:0/device:CPU:0"))
+
+ # NOTE(mrry): This device block creates the "host" dataset and iterator on
+ # /cpu:0, and ensures that the prefetching is across devices. In typical use
+ # this would not be necessary, because the GPU device would not support any
+ # of the dataset-related ops.
+ with ops.device("/cpu:0"):
+ iterator = device_dataset.make_one_shot_iterator()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ next_element = iterator.get_next()
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ with self.test_session() as sess:
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
def testPrefetchDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
diff --git a/tensorflow/contrib/eager/python/examples/revnet/blocks.py b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
index af41f64286..74c1825a49 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/blocks.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
@@ -24,6 +24,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import six
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import ops
@@ -93,9 +94,18 @@ class RevBlock(tf.keras.Model):
for i in reversed(range(len(self.blocks))):
block = self.blocks[i]
- y_inv = x if i == 0 else block.backward(y, training=training)
+ if i == 0:
+ y_inv = x
+ else:
+ # Don't update running stats when reconstructing activations
+ vars_and_vals = block.get_moving_stats()
+ y_inv = block.backward(y, training=training)
+ block.restore_moving_stats(vars_and_vals)
+
+ # Update running stats when computing gradients during training
dy, grads, vars_ = block.backward_grads_and_vars(
y_inv, dy, training=training)
+
grads_all += grads
vars_all += vars_
@@ -159,17 +169,18 @@ class _Residual(tf.keras.Model):
"""Apply residual block to inputs."""
x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis)
- f_x2 = self.f.call(x2, training=training)
+ f_x2 = self.f(x2, training=training)
# TODO(lxuechen): Replace with simpler downsampling
x1_down = ops.downsample(
x1, self.filters // 2, self.strides, axis=self.axis)
x2_down = ops.downsample(
x2, self.filters // 2, self.strides, axis=self.axis)
y1 = f_x2 + x1_down
- g_y1 = self.g.call(y1, training=training) # self.g(y1) gives pylint error
+ g_y1 = self.g(y1, training=training)
y2 = g_y1 + x2_down
- if not concat: # Concat option needed for correct backward grads
+ if not concat: # For correct backward grads
return y1, y2
+
return tf.concat([y1, y2], axis=self.axis)
def backward(self, y, training=True):
@@ -178,9 +189,9 @@ class _Residual(tf.keras.Model):
assert self.strides == (1, 1)
y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis)
- g_y1 = self.g.call(y1, training=training)
+ g_y1 = self.g(y1, training=training)
x2 = y2 - g_y1
- f_x2 = self.f.call(x2, training=training)
+ f_x2 = self.f(x2, training=training)
x1 = y1 - f_x2
return tf.concat([x1, x2], axis=self.axis)
@@ -216,6 +227,22 @@ class _Residual(tf.keras.Model):
return tf.concat([dx1, dx2], axis=self.axis), grads, vars_
+ def get_moving_stats(self):
+ vars_and_vals = {}
+
+ def _is_moving_var(v): # pylint: disable=invalid-name
+ n = v.name
+ return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
+
+ for v in filter(_is_moving_var, self.f.variables + self.g.variables):
+ vars_and_vals[v] = v.read_value()
+
+ return vars_and_vals
+
+ def restore_moving_stats(self, vars_and_vals):
+ for var_, val in six.iteritems(vars_and_vals):
+ var_.assign(val)
+
def _BottleneckResidualInner(filters,
strides,
diff --git a/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py b/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
index f4436fd925..a28ca6e3e0 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
@@ -240,13 +240,12 @@ class _ResidualTest(tf.test.TestCase):
x = tf.random_normal(shape=data_shape)
residual = blocks._Residual(
filters=16, strides=(1, 1), input_shape=input_shape)
+
y_tr, y_ev = residual(x, training=True), residual(x, training=False)
- x_ = residual.backward(y_tr, training=True)
- # The numerical loss is alarming; reconstructed inputs could differ from
- # the original inputs often by more than 1e-3
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
x_ = residual.backward(y_ev, training=False)
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
+ self.assertAllClose(x, x_, rtol=1e-1, atol=1e-1)
+ x_ = residual.backward(y_tr, training=True) # This updates moving avg
+ self.assertAllClose(x, x_, rtol=1e-1, atol=1e-1)
def test_backward_channels_last(self):
"""Test `backward` function with `channels_last` data format."""
@@ -259,12 +258,12 @@ class _ResidualTest(tf.test.TestCase):
strides=(1, 1),
input_shape=input_shape,
data_format="channels_last")
+
y_tr, y_ev = residual(x, training=True), residual(x, training=False)
- x_ = residual.backward(y_tr, training=True)
- # Egregious numerical error
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
x_ = residual.backward(y_ev, training=False)
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
+ self.assertAllClose(x, x_, rtol=1e-1, atol=1e-1)
+ x_ = residual.backward(y_tr, training=True) # This updates moving avg
+ self.assertAllClose(x, x_, rtol=1e-1, atol=1e-1)
def test_backward_grads_and_vars_channels_first(self):
"""Test `backward_grads` function with `channels_first` data format."""
@@ -278,6 +277,8 @@ class _ResidualTest(tf.test.TestCase):
dy = tf.random_normal(shape=data_shape)
residual = blocks._Residual(
filters=16, strides=(1, 1), input_shape=input_shape)
+
+ vars_and_vals = residual.get_moving_stats()
dx_tr, grads_tr, vars_tr = residual.backward_grads_and_vars(
x, dy=dy, training=True)
dx_ev, grads_ev, vars_ev = residual.backward_grads_and_vars(
@@ -289,10 +290,23 @@ class _ResidualTest(tf.test.TestCase):
self.assertTrue(isinstance(vars_ev, list))
for grad_tr, var_tr, grad_ev, var_ev in zip(grads_tr, vars_tr, grads_ev,
vars_ev):
- if grad_tr is not None: # Batch norm moving mean, var gives None grad
- self.assertEqual(grad_tr.shape, grad_ev.shape)
- self.assertEqual(var_tr.shape, var_ev.shape)
- self.assertEqual(grad_tr.shape, var_tr.shape)
+ self.assertEqual(grad_tr.shape, grad_ev.shape)
+ self.assertEqual(var_tr.shape, var_ev.shape)
+ self.assertEqual(grad_tr.shape, var_tr.shape)
+
+ # Compare against the true gradient computed by the tape
+ residual.restore_moving_stats(vars_and_vals)
+ with tf.GradientTape(persistent=True) as tape:
+ tape.watch(x)
+ y = residual(x, training=True)
+ grads = tape.gradient(
+ y, [x] + residual.trainable_variables, output_gradients=[dy])
+ dx_tr_true, grads_tr_true = grads[0], grads[1:]
+
+ del tape
+
+ self.assertAllClose(dx_tr, dx_tr_true, rtol=1e-1, atol=1e-1)
+ self.assertAllClose(grads_tr, grads_tr_true, rtol=1e-1, atol=1e-1)
def test_backward_grads_and_vars_channels_last(self):
"""Test `backward_grads` function with `channels_last` data format."""
@@ -306,6 +320,7 @@ class _ResidualTest(tf.test.TestCase):
strides=(1, 1),
input_shape=input_shape,
data_format="channels_last")
+
dx_tr, grads_tr, vars_tr = residual.backward_grads_and_vars(
x, dy=dy, training=True)
dx_ev, grads_ev, vars_ev = residual.backward_grads_and_vars(
@@ -317,10 +332,9 @@ class _ResidualTest(tf.test.TestCase):
self.assertTrue(isinstance(vars_ev, list))
for grad_tr, var_tr, grad_ev, var_ev in zip(grads_tr, vars_tr, grads_ev,
vars_ev):
- if grad_tr is not None: # Batch norm moving mean, var gives None grad
- self.assertEqual(grad_tr.shape, grad_ev.shape)
- self.assertEqual(var_tr.shape, var_ev.shape)
- self.assertEqual(grad_tr.shape, var_tr.shape)
+ self.assertEqual(grad_tr.shape, grad_ev.shape)
+ self.assertEqual(var_tr.shape, var_ev.shape)
+ self.assertEqual(grad_tr.shape, var_tr.shape)
class _ResidualInnerTest(tf.test.TestCase):
diff --git a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
index 3bc69da5ad..e1d8b3a055 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
@@ -26,8 +26,6 @@ import tensorflow as tf
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
NUM_CHANNEL = 3
-NUM_TRAIN_IMG = 50000
-NUM_TEST_IMG = 10000
def get_ds_from_tfrecords(data_dir,
@@ -37,8 +35,8 @@ def get_ds_from_tfrecords(data_dir,
epochs=None,
shuffle=True,
data_format="channels_first",
- num_parallel_calls=4,
- prefetch=True,
+ num_parallel_calls=8,
+ prefetch=0,
div255=True,
dtype=tf.float32):
"""Returns a tf.train.Dataset object from reading tfrecords.
@@ -48,11 +46,12 @@ def get_ds_from_tfrecords(data_dir,
split: "train", "validation", or "test"
data_aug: Apply data augmentation if True
batch_size: Batch size of dataset object
- epochs: Number of epochs to repeat the dataset
+ epochs: Number of epochs to repeat the dataset; default `None` means
+ repeating indefinitely
shuffle: Shuffle the dataset if True
data_format: `channels_first` or `channels_last`
num_parallel_calls: Number of threads for dataset preprocess
- prefetch: Apply prefetch for the dataset if True
+ prefetch: Buffer size for prefetch
div255: Divide the images by 255 if True
dtype: Data type of images
Returns:
@@ -62,7 +61,7 @@ def get_ds_from_tfrecords(data_dir,
ValueError: Unknown split
"""
- if split not in ["train", "validation", "test"]:
+ if split not in ["train", "validation", "test", "train_all"]:
raise ValueError("Unknown split {}".format(split))
def _parser(serialized_example):
@@ -74,7 +73,11 @@ def get_ds_from_tfrecords(data_dir,
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
- image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNEL])
+ # Initially reshaping to [H, W, C] does not work
+ image = tf.reshape(image, [NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
+ # This is needed for `tf.image.resize_image_with_crop_or_pad`
+ image = tf.transpose(image, [1, 2, 0])
+
image = tf.cast(image, dtype)
label = tf.cast(features["label"], tf.int32)
@@ -93,13 +96,21 @@ def get_ds_from_tfrecords(data_dir,
return image, label
filename = os.path.join(data_dir, split + ".tfrecords")
- dataset = tf.data.TFRecordDataset(filename).repeat(epochs)
+ dataset = tf.data.TFRecordDataset(filename)
+ dataset = dataset.repeat(epochs)
dataset = dataset.map(_parser, num_parallel_calls=num_parallel_calls)
+ dataset = dataset.prefetch(prefetch)
- if prefetch:
- dataset = dataset.prefetch(batch_size)
if shuffle:
- dataset = dataset.shuffle(NUM_TRAIN_IMG)
+ # Find the right size according to the split
+ size = {
+ "train": 40000,
+ "validation": 10000,
+ "test": 10000,
+ "train_all": 50000
+ }[split]
+ dataset = dataset.shuffle(size)
+
dataset = dataset.batch(batch_size)
return dataset
diff --git a/tensorflow/contrib/eager/python/examples/revnet/config.py b/tensorflow/contrib/eager/python/examples/revnet/config.py
index 263a65dc76..30b0edbf43 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/config.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/config.py
@@ -61,12 +61,13 @@ def get_hparams_cifar_38():
config.add_hparam("max_train_iter", 80000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
- config.add_hparam("prefetch", True)
- config.add_hparam("log_every", 50)
- config.add_hparam("save_every", 50)
+ config.add_hparam("log_every", 500)
+ config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
- config.add_hparam("eval_batch_size", 500)
+ config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
+ # TODO(lxuechen): This is imprecise, when training with validation set,
+ # we only have 40k images in training data
config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
@@ -104,11 +105,10 @@ def get_hparams_imagenet_56():
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
- config.add_hparam("prefetch", True)
config.add_hparam("log_every", 50)
config.add_hparam("save_every", 50)
config.add_hparam("dtype", tf.float32)
- config.add_hparam("eval_batch_size", 500)
+ config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
# TODO(lxuechen): Update this according to ImageNet data
config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/main.py b/tensorflow/contrib/eager/python/examples/revnet/main.py
index 9ef11f8e9b..1065592509 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/main.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/main.py
@@ -19,9 +19,11 @@ from __future__ import division
from __future__ import print_function
import os
+import sys
from absl import flags
import tensorflow as tf
+from tqdm import tqdm
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
@@ -38,28 +40,54 @@ def main(_):
tf.enable_eager_execution()
config = config_.get_hparams_cifar_38()
- model = revnet.RevNet(config=config)
-
- ds_train = cifar_input.get_ds_from_tfrecords(
- data_dir=FLAGS.data_dir,
- split="train",
- data_aug=True,
- batch_size=config.batch_size,
- epochs=config.epochs,
- shuffle=config.shuffle,
- data_format=config.data_format,
- dtype=config.dtype,
- prefetch=config.prefetch)
- ds_validation = cifar_input.get_ds_from_tfrecords(
+ if FLAGS.validate:
+ # 40k Training set
+ ds_train = cifar_input.get_ds_from_tfrecords(
+ data_dir=FLAGS.data_dir,
+ split="train",
+ data_aug=True,
+ batch_size=config.batch_size,
+ epochs=config.epochs,
+ shuffle=config.shuffle,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.batch_size)
+ # 10k Training set
+ ds_validation = cifar_input.get_ds_from_tfrecords(
+ data_dir=FLAGS.data_dir,
+ split="validation",
+ data_aug=False,
+ batch_size=config.eval_batch_size,
+ epochs=1,
+ shuffle=False,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.eval_batch_size)
+ else:
+ # 50k Training set
+ ds_train = cifar_input.get_ds_from_tfrecords(
+ data_dir=FLAGS.data_dir,
+ split="train_all",
+ data_aug=True,
+ batch_size=config.batch_size,
+ epochs=config.epochs,
+ shuffle=config.shuffle,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.batch_size)
+
+ # Always compute loss and accuracy on whole training and test set
+ ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=FLAGS.data_dir,
- split="validation",
+ split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
+ shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
- prefetch=config.prefetch)
+ prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
data_dir=FLAGS.data_dir,
@@ -67,69 +95,116 @@ def main(_):
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
+ shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
- prefetch=config.prefetch)
+ prefetch=config.eval_batch_size)
+ model = revnet.RevNet(config=config)
global_step = tfe.Variable(1, trainable=False)
-
- def learning_rate(): # TODO(lxuechen): Remove once cl/201089859 is in place
- return tf.train.piecewise_constant(global_step, config.lr_decay_steps,
- config.lr_list)
-
- optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
- checkpoint = tf.train.Checkpoint(
+ learning_rate = tf.train.piecewise_constant(
+ global_step, config.lr_decay_steps, config.lr_list)
+ optimizer = tf.train.MomentumOptimizer(
+ learning_rate, momentum=config.momentum)
+ checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
- checkpoint.restore(latest_path)
+ checkpointer.restore(latest_path)
+ print("Restored latest checkpoint at path:\"{}\" "
+ "with global_step: {}".format(latest_path, global_step.numpy()))
+ sys.stdout.flush()
+
+ warmup(model, config)
for x, y in ds_train:
loss = train_one_iter(model, x, y, optimizer, global_step=global_step)
- if global_step % config.log_every == 0:
- it_validation = ds_validation.make_one_shot_iterator()
+ if global_step.numpy() % config.log_every == 0:
+ it_train = ds_train_one_shot.make_one_shot_iterator()
+ acc_train, loss_train = evaluate(model, it_train)
it_test = ds_test.make_one_shot_iterator()
- acc_validation = evaluate(model, it_validation)
- acc_test = evaluate(model, it_test)
- print("Iter {}, "
- "train loss {}, "
- "validation accuracy {}, "
- "test accuracy {}".format(global_step.numpy(), loss, acc_validation,
- acc_test))
+ acc_test, loss_test = evaluate(model, it_test)
+ if FLAGS.validate:
+ it_validation = ds_validation.make_one_shot_iterator()
+ acc_validation, loss_validation = evaluate(model, it_validation)
+ print("Iter {}, "
+ "training set accuracy {:.4f}, loss {:.4f}; "
+ "validation set accuracy {:.4f}, loss {:4.f}"
+ "test accuracy {:.4f}, loss {:.4f}".format(
+ global_step.numpy(), acc_train, loss_train, acc_validation,
+ loss_validation, acc_test, loss_test))
+ else:
+ print("Iter {}, "
+ "training set accuracy {:.4f}, loss {:.4f}; "
+ "test accuracy {:.4f}, loss {:.4f}".format(
+ global_step.numpy(), acc_train, loss_train, acc_test,
+ loss_test))
+ sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
- tf.contrib.summary.scalar("Validation accuracy", acc_validation)
- tf.contrib.summary.scalar("Test accuracy", acc_test)
tf.contrib.summary.scalar("Training loss", loss)
+ tf.contrib.summary.scalar("Test accuracy", acc_test)
+ if FLAGS.validate:
+ tf.contrib.summary.scalar("Validation accuracy", acc_validation)
if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
- checkpoint.save(file_prefix=FLAGS.train_dir + "ckpt")
+ saved_path = checkpointer.save(
+ file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
+ print("Saved checkpoint at path: \"{}\" "
+ "with global_step: {}".format(saved_path, global_step.numpy()))
+ sys.stdout.flush()
+
+def warmup(model, config, steps=1):
+ mock_input = tf.random_normal((config.batch_size,) + config.input_shape)
+ for _ in range(steps):
+ model(mock_input, training=False)
-def train_one_iter(model, inputs, labels, optimizer, global_step=None):
+
+def train_one_iter(model,
+ inputs,
+ labels,
+ optimizer,
+ global_step=None,
+ verbose=False):
"""Train for one iteration."""
- grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
- optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
+ if FLAGS.manual_grad:
+ if verbose:
+ print("Using manual gradients")
+ grads, vars_, loss = model.compute_gradients(inputs, labels)
+ optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
+ else: # For correctness validation
+ if verbose:
+ print("Not using manual gradients")
+ with tf.GradientTape() as tape:
+ logits, _ = model(inputs, training=True)
+ loss = model.compute_loss(logits=logits, labels=labels)
+ grads = tape.gradient(loss, model.trainable_variables)
+ optimizer.apply_gradients(
+ zip(grads, model.trainable_variables), global_step=global_step)
return loss.numpy()
def evaluate(model, iterator):
"""Compute accuracy with the given dataset iterator."""
+ mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
- for x, y in iterator:
+ for x, y in tqdm(iterator):
logits, _ = model(x, training=False)
+ loss = model.compute_loss(logits=logits, labels=y)
accuracy(
labels=tf.cast(y, tf.int64),
predictions=tf.argmax(logits, axis=1, output_type=tf.int64))
+ mean_loss(loss)
- return accuracy.result().numpy()
+ return accuracy.result().numpy(), mean_loss.result().numpy()
if __name__ == "__main__":
@@ -138,10 +213,18 @@ if __name__ == "__main__":
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_string(
- "data_dir", default=None, help="Directory to load tfrecords.")
+ "data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_boolean(
"restore",
- default=True,
+ default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
+ flags.DEFINE_boolean(
+ "validate",
+ default=False,
+ help="[Optional] Use the validation set or not for hyperparameter search")
+ flags.DEFINE_boolean(
+ "manual_grad",
+ default=False,
+ help="[Optional] Use manual gradient graph to save memory")
FLAGS = flags.FLAGS
tf.app.run(main)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet.py b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
index b3b8c262b1..0228bff6fa 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
@@ -27,6 +27,7 @@ from __future__ import print_function
import functools
import operator
+import six
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
@@ -47,6 +48,7 @@ class RevNet(tf.keras.Model):
self._init_block = self._construct_init_block()
self._block_list = self._construct_intermediate_blocks()
self._final_block = self._construct_final_block()
+ self._moving_stats_vars = None
def _construct_init_block(self):
init_block = tf.keras.Sequential(
@@ -153,7 +155,6 @@ class RevNet(tf.keras.Model):
def call(self, inputs, training=True):
"""Forward pass."""
- # Only store hidden states during training
if training:
saved_hidden = [inputs]
@@ -181,17 +182,22 @@ class RevNet(tf.keras.Model):
def compute_gradients(self, inputs, labels, training=True):
"""Manually computes gradients.
+ This method also SILENTLY updates the running averages of batch
+ normalization when `training` is set to True.
+
Args:
inputs: Image tensor, either NHWC or NCHW, conforming to `data_format`
labels: One-hot labels for classification
- training: for batch normalization
+ training: Use the mini-batch stats in batch norm if set to True
Returns:
- list of tuple each being (grad, var) for optimizer use
+ list of tuples each being (grad, var) for optimizer to use
"""
- # Forward pass record hidden states before downsampling
+ # Run forward pass to record hidden states; avoid updating running averages
+ vars_and_vals = self.get_moving_stats()
_, saved_hidden = self.call(inputs, training=training)
+ self.restore_moving_stats(vars_and_vals)
grads_all = []
vars_all = []
@@ -201,6 +207,7 @@ class RevNet(tf.keras.Model):
with tf.GradientTape() as tape:
x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed
tape.watch(x)
+ # Running stats updated below
logits = self._final_block(x, training=training)
loss = self.compute_loss(logits, labels)
@@ -226,16 +233,38 @@ class RevNet(tf.keras.Model):
with tf.GradientTape() as tape:
x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed
+ # Running stats updated below
y = self._init_block(x, training=training)
grads_all += tape.gradient(
y, self._init_block.trainable_variables, output_gradients=[dy])
vars_all += self._init_block.trainable_variables
+ # Apply weight decay
grads_all = self._apply_weight_decay(grads_all, vars_all)
return grads_all, vars_all, loss
def _apply_weight_decay(self, grads, vars_):
"""Update gradients to reflect weight decay."""
- return [g + self.config.weight_decay * v for g, v in zip(grads, vars_)]
+ # Don't decay bias
+ return [
+ g + self.config.weight_decay * v if v.name.endswith("kernel:0") else g
+ for g, v in zip(grads, vars_)
+ ]
+
+ def get_moving_stats(self):
+ vars_and_vals = {}
+
+ def _is_moving_var(v):
+ n = v.name
+ return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
+
+ for v in filter(_is_moving_var, self.variables):
+ vars_and_vals[v] = v.read_value()
+
+ return vars_and_vals
+
+ def restore_moving_stats(self, vars_and_vals):
+ for var_, val in six.iteritems(vars_and_vals):
+ var_.assign(val)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
index cb3bac13f9..a5f240436a 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
@@ -36,10 +36,11 @@ def train_one_iter(model, inputs, labels, optimizer, global_step=None):
return loss
-class RevnetTest(tf.test.TestCase):
+class RevNetTest(tf.test.TestCase):
def setUp(self):
- super(RevnetTest, self).setUp()
+ super(RevNetTest, self).setUp()
+ tf.set_random_seed(1)
config = config_.get_hparams_imagenet_56()
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
@@ -56,7 +57,7 @@ class RevnetTest(tf.test.TestCase):
del self.x
del self.t
del self.config
- super(RevnetTest, self).tearDown()
+ super(RevNetTest, self).tearDown()
def test_call(self):
"""Test `call` function."""
@@ -67,7 +68,8 @@ class RevnetTest(tf.test.TestCase):
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
- grads, vars_, _ = self.model.compute_gradients(inputs=self.x, labels=self.t)
+ grads, vars_, _ = self.model.compute_gradients(
+ inputs=self.x, labels=self.t, training=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
@@ -84,7 +86,7 @@ class RevnetTest(tf.test.TestCase):
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
compute_gradients = tfe.defun(self.model.compute_gradients)
- grads, vars_, _ = compute_gradients(self.x, self.t)
+ grads, vars_, _ = compute_gradients(self.x, self.t, training=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
@@ -144,7 +146,7 @@ class MockIterator(object):
return self._tensors
-class RevnetBenchmark(tf.test.Benchmark):
+class RevNetBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for RevNet."""
def _train_batch_sizes(self):
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn.py b/tensorflow/contrib/estimator/python/estimator/dnn.py
index f1c60a912c..4bb90cf81b 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn.py
@@ -53,6 +53,18 @@ class DNNEstimator(estimator.Estimator):
l1_regularization_strength=0.001
))
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = DNNEstimator(
+ head=tf.contrib.estimator.multi_label_head(n_classes=3),
+ feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
+ hidden_units=[1024, 512, 256],
+ optimizer=lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
@@ -115,8 +127,9 @@ class DNNEstimator(estimator.Estimator):
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to Adagrad optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
index ccaf1128bf..894a295498 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
@@ -53,12 +53,19 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
- # To apply L1 and L2 regularization, you can set optimizers as follows:
+ # To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
- # It is same for FtrlOptimizer.
+ # To apply learning rate decay, you can set dnn_optimizer to a callable:
+ lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96)
+ # It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
@@ -116,12 +123,16 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the linear part of the model. Defaults to FTRL optimizer.
+ the linear part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
+ optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the deep part of the model. Defaults to Adagrad optimizer.
+ the deep part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
+ optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
diff --git a/tensorflow/contrib/estimator/python/estimator/linear.py b/tensorflow/contrib/estimator/python/estimator/linear.py
index 3bf4abe83d..b960b16f1b 100644
--- a/tensorflow/contrib/estimator/python/estimator/linear.py
+++ b/tensorflow/contrib/estimator/python/estimator/linear.py
@@ -39,6 +39,18 @@ class LinearEstimator(estimator.Estimator):
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b])
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = LinearEstimator(
+ head=tf.contrib.estimator.multi_label_head(n_classes=3),
+ feature_columns=[categorical_column_a,
+ categorical_feature_a_x_categorical_feature_b],
+ optimizer=lambda: tf.train.FtrlOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
@@ -99,8 +111,9 @@ class LinearEstimator(estimator.Estimator):
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to FTRL optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
"""
diff --git a/tensorflow/contrib/lite/java/demo/app/build.gradle b/tensorflow/contrib/lite/java/demo/app/build.gradle
index 44ea2dcd90..192162cfce 100644
--- a/tensorflow/contrib/lite/java/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/demo/app/build.gradle
@@ -5,7 +5,8 @@ android {
buildToolsVersion "26.0.1"
defaultConfig {
applicationId "android.example.com.tflitecamerademo"
- minSdkVersion 15
+ // Required by Camera2 API.
+ minSdkVersion 21
targetSdkVersion 26
versionCode 1
versionName "1.0"
diff --git a/tensorflow/contrib/lite/python/lite.py b/tensorflow/contrib/lite/python/lite.py
index 69a2f638af..a4229f91f5 100644
--- a/tensorflow/contrib/lite/python/lite.py
+++ b/tensorflow/contrib/lite/python/lite.py
@@ -50,6 +50,7 @@ from tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: di
from tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2 as _graph_pb2
+from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework.importer import import_graph_def
@@ -269,6 +270,48 @@ class TocoConverter(object):
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
+ @classmethod
+ def from_keras_model_file(cls,
+ model_file,
+ input_arrays=None,
+ input_shapes=None,
+ output_arrays=None):
+ """Creates a TocoConverter class from a tf.keras model file.
+
+ Args:
+ model_file: Full filepath of HDF5 file containing the tf.keras model.
+ input_arrays: List of input tensors to freeze graph with. Uses input
+ arrays from SignatureDef when none are provided. (default None)
+ input_shapes: Dict of strings representing input tensor names to list of
+ integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
+ Automatically determined when input shapes is None (e.g., {"foo" :
+ None}). (default None)
+ output_arrays: List of output tensors to freeze graph with. Uses output
+ arrays from SignatureDef when none are provided. (default None)
+
+ Returns:
+ TocoConverter class.
+ """
+ _keras.backend.clear_session()
+ _keras.backend.set_learning_phase(False)
+ keras_model = _keras.models.load_model(model_file)
+ sess = _keras.backend.get_session()
+
+ # Get input and output tensors.
+ if input_arrays:
+ input_tensors = get_tensors_from_tensor_names(sess.graph, input_arrays)
+ else:
+ input_tensors = keras_model.inputs
+
+ if output_arrays:
+ output_tensors = get_tensors_from_tensor_names(sess.graph, output_arrays)
+ else:
+ output_tensors = keras_model.outputs
+ set_tensor_shapes(input_tensors, input_shapes)
+
+ graph_def = _freeze_graph(sess, output_tensors)
+ return cls(graph_def, input_tensors, output_tensors)
+
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
@@ -366,7 +409,7 @@ def _is_frozen_graph(sess):
Bool.
"""
for op in sess.graph.get_operations():
- if op.type.startswith("Variable"):
+ if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
diff --git a/tensorflow/contrib/lite/python/lite_test.py b/tensorflow/contrib/lite/python/lite_test.py
index a9475de474..ca2af5aaed 100644
--- a/tensorflow/contrib/lite/python/lite_test.py
+++ b/tensorflow/contrib/lite/python/lite_test.py
@@ -19,11 +19,13 @@ from __future__ import division
from __future__ import print_function
import os
+import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
+from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -618,5 +620,279 @@ class FromSavedModelTest(test_util.TensorFlowTestCase):
self.assertTrue(tflite_model)
+class FromKerasFile(test_util.TensorFlowTestCase):
+
+ def setUp(self):
+ keras.backend.clear_session()
+
+ def _getSequentialModel(self):
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(2, input_shape=(3,)))
+ model.add(keras.layers.RepeatVector(3))
+ model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3, 3))
+ model.train_on_batch(x, y)
+ model.predict(x)
+
+ try:
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+ finally:
+ os.close(fd)
+ return keras_file
+
+ def testSequentialModel(self):
+ """Test a Sequential tf.keras model with default inputs."""
+ keras_file = self._getSequentialModel()
+
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ def testSequentialModelInputArray(self):
+ """Test a Sequential tf.keras model testing input arrays argument."""
+ keras_file = self._getSequentialModel()
+
+ # Invalid input array raises error.
+ with self.assertRaises(ValueError) as error:
+ lite.TocoConverter.from_keras_model_file(
+ keras_file, input_arrays=['invalid-input'])
+ self.assertEqual("Invalid tensors 'invalid-input' were found.",
+ str(error.exception))
+
+ # Valid input array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_arrays=['dense_input'])
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ def testSequentialModelInputShape(self):
+ """Test a Sequential tf.keras model testing input shapes argument."""
+ keras_file = self._getSequentialModel()
+
+ # Passing in shape of invalid input array has no impact as long as all input
+ # arrays have a shape.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_shapes={'invalid-input': [2, 3]})
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ # Passing in shape of valid input array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_shapes={'dense_input': [2, 3]})
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ # Check input shape from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertTrue(([2, 3] == input_details[0]['shape']).all())
+
+ def testSequentialModelOutputArray(self):
+ """Test a Sequential tf.keras model testing output arrays argument."""
+ keras_file = self._getSequentialModel()
+
+ # Invalid output array raises error.
+ with self.assertRaises(ValueError) as error:
+ lite.TocoConverter.from_keras_model_file(
+ keras_file, output_arrays=['invalid-output'])
+ self.assertEqual("Invalid tensors 'invalid-output' were found.",
+ str(error.exception))
+
+ # Valid output array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, output_arrays=['time_distributed/Reshape_1'])
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ def testFunctionalModel(self):
+ """Test a Functional tf.keras model with default inputs."""
+ inputs = keras.layers.Input(shape=(3,), name='input')
+ x = keras.layers.Dense(2)(inputs)
+ output = keras.layers.Dense(3)(x)
+
+ model = keras.models.Model(inputs, output)
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy])
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3))
+ model.train_on_batch(x, y)
+
+ model.predict(x)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ def testFunctionalModelMultipleInputs(self):
+ """Test a Functional tf.keras model with multiple inputs and outputs."""
+ a = keras.layers.Input(shape=(3,), name='input_a')
+ b = keras.layers.Input(shape=(3,), name='input_b')
+ dense = keras.layers.Dense(4, name='dense')
+ c = dense(a)
+ d = dense(b)
+ e = keras.layers.Dropout(0.5, name='dropout')(c)
+
+ model = keras.models.Model([a, b], [d, e])
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.mae],
+ loss_weights=[1., 0.5])
+
+ input_a_np = np.random.random((10, 3))
+ input_b_np = np.random.random((10, 3))
+ output_d_np = np.random.random((10, 4))
+ output_e_np = np.random.random((10, 4))
+ model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
+
+ model.predict([input_a_np, input_b_np], batch_size=5)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(2, len(input_details))
+ self.assertEqual('input_a', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ self.assertEqual('input_b', input_details[1]['name'])
+ self.assertEqual(np.float32, input_details[1]['dtype'])
+ self.assertTrue(([1, 3] == input_details[1]['shape']).all())
+ self.assertEqual((0., 0.), input_details[1]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(2, len(output_details))
+ self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 4] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ self.assertEqual('dropout/Identity', output_details[1]['name'])
+ self.assertEqual(np.float32, output_details[1]['dtype'])
+ self.assertTrue(([1, 4] == output_details[1]['shape']).all())
+ self.assertEqual((0., 0.), output_details[1]['quantization'])
+
+ def testFunctionalSequentialModel(self):
+ """Test a Functional tf.keras model containing a Sequential model."""
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(2, input_shape=(3,)))
+ model.add(keras.layers.RepeatVector(3))
+ model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
+ model = keras.models.Model(model.input, model.output)
+
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3, 3))
+ model.train_on_batch(x, y)
+ model.predict(x)
+
+ model.predict(x)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/lite/python/tflite_convert.py b/tensorflow/contrib/lite/python/tflite_convert.py
index d18a29834b..249b940f92 100644
--- a/tensorflow/contrib/lite/python/tflite_convert.py
+++ b/tensorflow/contrib/lite/python/tflite_convert.py
@@ -74,6 +74,9 @@ def _get_toco_converter(flags):
converter_kwargs["saved_model_dir"] = flags.saved_model_dir
converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
converter_kwargs["signature_key"] = flags.saved_model_signature_key
+ elif flags.keras_model_file:
+ converter_fn = lite.TocoConverter.from_keras_model_file
+ converter_kwargs["model_file"] = flags.keras_model_file
return converter_fn(**converter_kwargs)
@@ -227,6 +230,10 @@ def run_main(_):
"--saved_model_dir",
type=str,
help="Full filepath of directory containing the SavedModel.")
+ input_file_group.add_argument(
+ "--keras_model_file",
+ type=str,
+ help="Full filepath of HDF5 file containing tf.Keras model.")
# Model format flags.
parser.add_argument(
diff --git a/tensorflow/contrib/lite/toco/g3doc/python_api.md b/tensorflow/contrib/lite/toco/g3doc/python_api.md
index afa6fd6957..b04d166f89 100644
--- a/tensorflow/contrib/lite/toco/g3doc/python_api.md
+++ b/tensorflow/contrib/lite/toco/g3doc/python_api.md
@@ -15,6 +15,7 @@ Table of contents:
* [Exporting a GraphDef from tf.Session](#basic-graphdef-sess)
* [Exporting a GraphDef from file](#basic-graphdef-file)
* [Exporting a SavedModel](#basic-savedmodel)
+ * [Exporting a tf.keras File](#basic-keras-file)
* [Complex examples](#complex)
* [Exporting a quantized GraphDef](#complex-quant)
* [TensorFlow Lite Python interpreter](#interpreter)
@@ -114,6 +115,51 @@ For more complex SavedModels, the optional parameters that can be passed into
`output_arrays`, `tag_set` and `signature_key`. Details of each parameter are
available by running `help(tf.contrib.lite.TocoConverter)`.
+### Exporting a tf.keras File <a name="basic-keras-file"></a>
+
+The following example shows how to convert a tf.keras model into a TensorFlow
+Lite FlatBuffer.
+
+```python
+import tensorflow as tf
+
+converter = tf.contrib.lite.TocoConverter.from_keras_model_file("keras_model.h5")
+tflite_model = converter.convert()
+open("converted_model.tflite", "wb").write(tflite_model)
+```
+
+The tf.keras file must contain both the model and the weights. A comprehensive
+example including model construction can be seen below.
+
+```python
+import numpy as np
+import tensorflow as tf
+
+# Generate tf.keras model.
+model = tf.keras.models.Sequential()
+model.add(tf.keras.layers.Dense(2, input_shape=(3,)))
+model.add(tf.keras.layers.RepeatVector(3))
+model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(3)))
+model.compile(loss=tf.keras.losses.MSE,
+ optimizer=tf.keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[tf.keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+
+x = np.random.random((1, 3))
+y = np.random.random((1, 3, 3))
+model.train_on_batch(x, y)
+model.predict(x)
+
+# Save tf.keras model in HDF5 format.
+keras_file = "keras_model.h5"
+tf.keras.models.save_model(model, keras_file)
+
+# Convert to TensorFlow Lite model.
+converter = tf.contrib.lite.TocoConverter.from_keras_model_file(keras_file)
+tflite_model = converter.convert()
+open("converted_model.tflite", "wb").write(tflite_model)
+```
+
## Complex examples <a name="complex"></a>
For models where the default value of the attributes is not sufficient, the
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index da7e5add7e..485e853e25 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -378,7 +378,7 @@ tensorflow::Status ImportBoolArray(const TensorProto& input_tensor,
for (int i = 0; i < input_flat_size; i++) {
output_bool_data[i] = input_tensor.bool_val(0);
}
- } else if (input_tensor.int_val_size() == input_flat_size) {
+ } else if (input_tensor.bool_val_size() == input_flat_size) {
for (int i = 0; i < input_tensor.bool_val_size(); i++) {
output_bool_data[i] = input_tensor.bool_val(i);
}
diff --git a/tensorflow/contrib/summary/summary_ops_test.py b/tensorflow/contrib/summary/summary_ops_test.py
index f1ef218e74..3e41e3d0b4 100644
--- a/tensorflow/contrib/summary/summary_ops_test.py
+++ b/tensorflow/contrib/summary/summary_ops_test.py
@@ -81,6 +81,19 @@ class EagerFileTest(test_util.TensorFlowTestCase):
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
+ @test_util.assert_no_new_pyobjects_executing_eagerly
+ def testEagerMemory(self):
+ training_util.get_or_create_global_step()
+ logdir = self.get_temp_dir()
+ with summary_ops.create_file_writer(
+ logdir, max_queue=0,
+ name='t0').as_default(), summary_ops.always_record_summaries():
+ summary_ops.generic('tensor', 1, '')
+ summary_ops.scalar('scalar', 2.0)
+ summary_ops.histogram('histogram', [1.0])
+ summary_ops.image('image', [[[[1.0]]]])
+ summary_ops.audio('audio', [[1.0]], 1.0, 1)
+
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()