python: rename package tflm_runtime to runtime (#2030) Rename the Python package `tflm_runtime` to simply `runtime` in preparation for adding it under the new namespace package `tflite_micro`. Its full name will then be `tflite_micro.runtime`. We have kept the `tflm_runtime` target as an alias in order to stage this change. More details in http://b/286456378 BUG=part of #1484
diff --git a/tensorflow/lite/micro/examples/hello_world/BUILD b/tensorflow/lite/micro/examples/hello_world/BUILD index b22a63e..50cb43e 100644 --- a/tensorflow/lite/micro/examples/hello_world/BUILD +++ b/tensorflow/lite/micro/examples/hello_world/BUILD
@@ -54,7 +54,7 @@ "@absl_py//absl/logging", requirement("numpy"), requirement("tensorflow-cpu"), - "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", + "//tensorflow/lite/micro/python/interpreter/src:runtime", ], )
diff --git a/tensorflow/lite/micro/examples/hello_world/evaluate.py b/tensorflow/lite/micro/examples/hello_world/evaluate.py index 1498bb6..8c5784b 100644 --- a/tensorflow/lite/micro/examples/hello_world/evaluate.py +++ b/tensorflow/lite/micro/examples/hello_world/evaluate.py
@@ -19,7 +19,7 @@ import numpy as np import matplotlib.pyplot as plt from tensorflow.python.platform import resource_loader -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime _USE_TFLITE_INTERPRETER = flags.DEFINE_bool( 'use_tflite', @@ -73,7 +73,7 @@ # returns the prediction of the interpreter. def get_tflm_prediction(model_path, x_values): # Create the tflm interpreter - tflm_interpreter = tflm_runtime.Interpreter.from_file(model_path) + tflm_interpreter = runtime.Interpreter.from_file(model_path) input_shape = np.array(tflm_interpreter.get_input_details(0).get('shape'))
diff --git a/tensorflow/lite/micro/examples/hello_world/evaluate_test.py b/tensorflow/lite/micro/examples/hello_world/evaluate_test.py index be9ffab..cf650ea 100644 --- a/tensorflow/lite/micro/examples/hello_world/evaluate_test.py +++ b/tensorflow/lite/micro/examples/hello_world/evaluate_test.py
@@ -18,8 +18,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.platform import resource_loader from tensorflow.python.platform import test -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import \ - tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime from tflite_micro.tensorflow.lite.micro.examples.hello_world import evaluate PREFIX_PATH = resource_loader.get_path_to_datafile('') @@ -29,7 +28,7 @@ model_path = os.path.join(PREFIX_PATH, 'models/hello_world_float.tflite') input_shape = (1, 1) output_shape = (1, 1) - tflm_interpreter = tflm_runtime.Interpreter.from_file(model_path) + tflm_interpreter = runtime.Interpreter.from_file(model_path) def test_compare_with_tflite(self): x_values = evaluate.generate_random_float_input() @@ -47,7 +46,7 @@ model_path = os.path.join(PREFIX_PATH, 'models/hello_world_int8.tflite') input_shape = (1, 1) output_shape = (1, 1) - tflm_interpreter = tflm_runtime.Interpreter.from_file(model_path) + tflm_interpreter = runtime.Interpreter.from_file(model_path) def test_compare_with_tflite(self): x_values = evaluate.generate_random_int8_input()
diff --git a/tensorflow/lite/micro/examples/hello_world/quantization/BUILD b/tensorflow/lite/micro/examples/hello_world/quantization/BUILD index 1df5f87..c062fa8 100644 --- a/tensorflow/lite/micro/examples/hello_world/quantization/BUILD +++ b/tensorflow/lite/micro/examples/hello_world/quantization/BUILD
@@ -12,6 +12,6 @@ "@absl_py//absl/logging", requirement("numpy"), requirement("tensorflow-cpu"), - "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", + "//tensorflow/lite/micro/python/interpreter/src:runtime", ], )
diff --git a/tensorflow/lite/micro/examples/mnist_lstm/BUILD b/tensorflow/lite/micro/examples/mnist_lstm/BUILD index 9e1a4e6..f2ef3d1 100644 --- a/tensorflow/lite/micro/examples/mnist_lstm/BUILD +++ b/tensorflow/lite/micro/examples/mnist_lstm/BUILD
@@ -15,7 +15,7 @@ srcs = ["evaluate.py"], srcs_version = "PY3", deps = [ - "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", + "//tensorflow/lite/micro/python/interpreter/src:runtime", "@absl_py//absl:app", ], )
diff --git a/tensorflow/lite/micro/examples/mnist_lstm/evaluate.py b/tensorflow/lite/micro/examples/mnist_lstm/evaluate.py index f2fdbf3..47c23c4 100644 --- a/tensorflow/lite/micro/examples/mnist_lstm/evaluate.py +++ b/tensorflow/lite/micro/examples/mnist_lstm/evaluate.py
@@ -28,7 +28,7 @@ import numpy as np from PIL import Image -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime FLAGS = flags.FLAGS @@ -113,7 +113,7 @@ """Use TFLM interpreter to predict a MNIST image Args: - interpreter (tflm_runtime.Interpreter): the TFLM python interpreter + interpreter (runtime.Interpreter): the TFLM python interpreter data (np.array): data to be predicted Returns: @@ -141,7 +141,7 @@ """Use TFLM interpreter to predict a MNIST image Args: - interpreter (tflm_runtime.Interpreter): the TFLM python interpreter + interpreter (runtime.Interpreter): the TFLM python interpreter image_path (str): path for the image that need to be tested Returns: @@ -158,7 +158,7 @@ if not os.path.exists(FLAGS.img_path): raise ValueError("Image file does not exist. Please check the image path.") - tflm_interpreter = tflm_runtime.Interpreter.from_file(FLAGS.model_path) + tflm_interpreter = runtime.Interpreter.from_file(FLAGS.model_path) category_probabilities = predict_image(tflm_interpreter, FLAGS.img_path) predicted_category = np.argmax(category_probabilities) logging.info("Model predicts the image as %i with probability %.2f",
diff --git a/tensorflow/lite/micro/examples/mnist_lstm/evaluate_test.py b/tensorflow/lite/micro/examples/mnist_lstm/evaluate_test.py index 1092a78..2d13160 100644 --- a/tensorflow/lite/micro/examples/mnist_lstm/evaluate_test.py +++ b/tensorflow/lite/micro/examples/mnist_lstm/evaluate_test.py
@@ -20,7 +20,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.platform import resource_loader from tensorflow.python.platform import test -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime from tflite_micro.tensorflow.lite.micro.examples.mnist_lstm import evaluate from tflite_micro.tensorflow.lite.micro.tools import requantize_flatbuffer @@ -33,7 +33,7 @@ self.model_path = os.path.join(PREFIX_PATH, "trained_lstm.tflite") self.input_shape = (1, 28, 28) self.output_shape = (1, 10) - self.tflm_interpreter = tflm_runtime.Interpreter.from_file(self.model_path) + self.tflm_interpreter = runtime.Interpreter.from_file(self.model_path) np.random.seed(42) #Seed the random number generator def testInputErrHandling(self): @@ -95,7 +95,7 @@ "trained_lstm_int8.tflite") self.input_shape = (1, 28, 28) self.output_shape = (1, 10) - self.tflm_interpreter_quant = tflm_runtime.Interpreter.from_file( + self.tflm_interpreter_quant = runtime.Interpreter.from_file( self.int8_model_path) np.random.seed(42) #Seed the random number generator @@ -106,8 +106,7 @@ # Create a float model for results comparison float_model_path = os.path.join(PREFIX_PATH, "trained_lstm.tflite") - tflm_interpreter_float = tflm_runtime.Interpreter.from_file( - float_model_path) + tflm_interpreter_float = runtime.Interpreter.from_file(float_model_path) num_test = 10 for _ in range(num_test): @@ -163,7 +162,7 @@ self.int16_model = self.requantizer.model_bytearray() self.input_shape = (1, 28, 28) self.output_shape = (1, 10) - self.tflm_interpreter_quant = tflm_runtime.Interpreter.from_bytes( + self.tflm_interpreter_quant = runtime.Interpreter.from_bytes( self.int16_model) np.random.seed(42) #Seed the random number generator @@ -174,8 +173,7 @@ # Create a float model for results comparison float_model_path = os.path.join(PREFIX_PATH, "trained_lstm.tflite") - tflm_interpreter_float = tflm_runtime.Interpreter.from_file( - float_model_path) + tflm_interpreter_float = runtime.Interpreter.from_file(float_model_path) num_test = 10 for _ in range(num_test):
diff --git a/tensorflow/lite/micro/examples/recipes/BUILD b/tensorflow/lite/micro/examples/recipes/BUILD index be80445..c3f0bec 100644 --- a/tensorflow/lite/micro/examples/recipes/BUILD +++ b/tensorflow/lite/micro/examples/recipes/BUILD
@@ -26,6 +26,8 @@ ], deps = [ ":resource_variables_lib", + # TODO(b/286456378): update tflm_runtime to runtime when we are ready to + # remove the alias. "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", ], )
diff --git a/tensorflow/lite/micro/examples/recipes/resource_variables_test.py b/tensorflow/lite/micro/examples/recipes/resource_variables_test.py index 1bc0522..c2ec582 100644 --- a/tensorflow/lite/micro/examples/recipes/resource_variables_test.py +++ b/tensorflow/lite/micro/examples/recipes/resource_variables_test.py
@@ -17,7 +17,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.platform import test from tflite_micro.tensorflow.lite.micro.examples.recipes import resource_variables_lib -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime class ResourceVariablesTest(test_util.TensorFlowTestCase): @@ -27,7 +27,7 @@ # (variable value), to be accumulated by 5.0 each invoke. def test_resource_variables_model(self): model_keras = resource_variables_lib.get_model_from_keras() - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_keras) + tflm_interpreter = runtime.Interpreter.from_bytes(model_keras) tflm_interpreter.set_input([[True]], 0) tflm_interpreter.set_input([np.full((100,), 15.0, dtype=np.float32)], 1)
diff --git a/tensorflow/lite/micro/python/interpreter/README.md b/tensorflow/lite/micro/python/interpreter/README.md index b2fd605..8a4e0cc 100644 --- a/tensorflow/lite/micro/python/interpreter/README.md +++ b/tensorflow/lite/micro/python/interpreter/README.md
@@ -13,7 +13,7 @@ #### Build The only package that needs to be included in the `BUILD` file is -`//tensorflow/lite/micro/python/interpreter/src:tflm_runtime`. It contains all +`//tensorflow/lite/micro/python/interpreter/src:runtime`. It contains all the correct dependencies to build the Python interpreter. ### PyPi @@ -34,13 +34,13 @@ ``` # For the Bazel workflow -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime # If model is a bytearray -tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) +tflm_interpreter = runtime.Interpreter.from_bytes(model_data) # If model is a file -tflm_interpreter = tflm_runtime.Interpreter.from_file(model_filepath) +tflm_interpreter = runtime.Interpreter.from_file(model_filepath) # Run inference on TFLM using an ndarray `data_x` tflm_interpreter.set_input(data_x, 0) @@ -62,7 +62,7 @@ [pybind11_bazel extension](https://github.com/pybind/pybind11_bazel). The most updated Python APIs can be found in -`tensorflow/lite/micro/python/interpreter/src/tflm_runtime.py`. +`tensorflow/lite/micro/python/interpreter/src/runtime.py`. ## Custom Ops @@ -116,7 +116,7 @@ For example, ``` -interpreter = tflm_runtime.Interpreter.from_file( +interpreter = runtime.Interpreter.from_file( model_path=model_path, custom_op_registerers=['SomeCustomRegisterer']) ``` @@ -152,7 +152,7 @@ 10016 bytes is the actual memory arena size. -During instantiation via the class methods `tflm_runtime.Interpreter.from_file` -or `tflm_runtime.Interpreter.from_bytes`, if `arena_size` is not explicitly +During instantiation via the class methods `runtime.Interpreter.from_file` +or `runtime.Interpreter.from_bytes`, if `arena_size` is not explicitly specified, the interpreter will default to a heuristic which is 10x the model size. This can be adjusted manually if desired.
diff --git a/tensorflow/lite/micro/python/interpreter/src/BUILD b/tensorflow/lite/micro/python/interpreter/src/BUILD index 6b2ecb9..6b698c3 100644 --- a/tensorflow/lite/micro/python/interpreter/src/BUILD +++ b/tensorflow/lite/micro/python/interpreter/src/BUILD
@@ -63,10 +63,23 @@ ], ) -py_library( +# tflm_runtime is deprecated, please use runtime instead. +# TODO(b/286456378): remove the alias once all usage is change to the runtime +# target. +alias( name = "tflm_runtime", - srcs = ["tflm_runtime.py"], - data = [":interpreter_wrapper_pybind.so"], + actual = ":runtime", + visibility = ["//visibility:public"], +) + +py_library( + name = "runtime", + srcs = [ + "runtime.py", + ], + data = [ + ":interpreter_wrapper_pybind.so", + ], srcs_version = "PY3", visibility = ["//visibility:public"], deps = [
diff --git a/tensorflow/lite/micro/python/interpreter/src/tflm_runtime.py b/tensorflow/lite/micro/python/interpreter/src/runtime.py similarity index 100% rename from tensorflow/lite/micro/python/interpreter/src/tflm_runtime.py rename to tensorflow/lite/micro/python/interpreter/src/runtime.py
diff --git a/tensorflow/lite/micro/python/interpreter/tests/BUILD b/tensorflow/lite/micro/python/interpreter/tests/BUILD index f6b3855..83092b5 100644 --- a/tensorflow/lite/micro/python/interpreter/tests/BUILD +++ b/tensorflow/lite/micro/python/interpreter/tests/BUILD
@@ -15,7 +15,7 @@ deps = [ requirement("numpy"), requirement("tensorflow-cpu"), - "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", + "//tensorflow/lite/micro/python/interpreter/src:runtime", "//tensorflow/lite/micro/testing:generate_test_models_lib", ], )
diff --git a/tensorflow/lite/micro/python/interpreter/tests/interpreter_test.py b/tensorflow/lite/micro/python/interpreter/tests/interpreter_test.py index 0ac3305..61c40f5 100644 --- a/tensorflow/lite/micro/python/interpreter/tests/interpreter_test.py +++ b/tensorflow/lite/micro/python/interpreter/tests/interpreter_test.py
@@ -30,7 +30,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.platform import test from tflite_micro.tensorflow.lite.micro.testing import generate_test_models -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime class ConvModelTests(test_util.TensorFlowTestCase): @@ -41,11 +41,11 @@ def testInitErrorHandling(self): with self.assertRaisesWithPredicateMatch(ValueError, "Invalid model file path"): - tflm_runtime.Interpreter.from_file("wrong.tflite") + runtime.Interpreter.from_file("wrong.tflite") def testInput(self): model_data = generate_test_models.generate_conv_model(False) - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + tflm_interpreter = runtime.Interpreter.from_bytes(model_data) data_x = np.random.randint(-127, 127, self.input_shape, dtype=np.int8) tflm_interpreter.set_input(data_x, 0) @@ -68,7 +68,7 @@ def testInputErrorHandling(self): model_data = generate_test_models.generate_conv_model(True, self.filename) - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + tflm_interpreter = runtime.Interpreter.from_bytes(model_data) data_x = np.random.randint(-127, 127, self.input_shape, dtype=np.int8) # Try to access out of bound data @@ -96,7 +96,7 @@ def testOutput(self): model_data = generate_test_models.generate_conv_model(True, self.filename) - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + tflm_interpreter = runtime.Interpreter.from_bytes(model_data) # Initial output values are all 0 output = tflm_interpreter.get_output(0) @@ -121,7 +121,7 @@ def testOutputErrorHandling(self): model_data = generate_test_models.generate_conv_model(True, self.filename) - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + tflm_interpreter = runtime.Interpreter.from_bytes(model_data) # Try to access out of bound data with self.assertRaisesWithPredicateMatch(IndexError, "Tensor is out of bound"): @@ -134,7 +134,7 @@ model_data = generate_test_models.generate_conv_model(True, self.filename) # TFLM interpreter - tflm_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + tflm_interpreter = runtime.Interpreter.from_bytes(model_data) # TFLite interpreter tflite_interpreter = tf.lite.Interpreter( @@ -169,8 +169,8 @@ def _helperModelFromFileAndBufferEqual(self): model_data = generate_test_models.generate_conv_model(True, self.filename) - file_interpreter = tflm_runtime.Interpreter.from_file(self.filename) - bytes_interpreter = tflm_runtime.Interpreter.from_bytes(model_data) + file_interpreter = runtime.Interpreter.from_file(self.filename) + bytes_interpreter = runtime.Interpreter.from_bytes(model_data) num_steps = 100 for i in range(0, num_steps): @@ -198,7 +198,7 @@ model_data = generate_test_models.generate_conv_model(False) interpreters = [ - tflm_runtime.Interpreter.from_bytes(model_data) for i in range(10) + runtime.Interpreter.from_bytes(model_data) for i in range(10) ] num_steps = 100 @@ -221,7 +221,7 @@ pass def _helperOutputTensorMemoryLeak(self): - interpreter = tflm_runtime.Interpreter.from_file(self.filename) + interpreter = runtime.Interpreter.from_file(self.filename) int_ref = weakref.finalize(interpreter, self._helperNoop) some_output = interpreter.get_output(0) output_ref = weakref.finalize(some_output, self._helperNoop) @@ -250,22 +250,22 @@ custom_op_registerers = [("wrong", "format")] with self.assertRaisesWithPredicateMatch(ValueError, "must be a list of strings"): - interpreter = tflm_runtime.Interpreter.from_bytes( - model_data, custom_op_registerers) + interpreter = runtime.Interpreter.from_bytes(model_data, + custom_op_registerers) custom_op_registerers = "WrongFormat" with self.assertRaisesWithPredicateMatch(ValueError, "must be a list of strings"): - interpreter = tflm_runtime.Interpreter.from_bytes( - model_data, custom_op_registerers) + interpreter = runtime.Interpreter.from_bytes(model_data, + custom_op_registerers) def testNonExistentCustomOps(self): model_data = generate_test_models.generate_conv_model(False) custom_op_registerers = ["SomeRandomOp"] with self.assertRaisesWithPredicateMatch( RuntimeError, "TFLM could not register custom op via SomeRandomOp"): - interpreter = tflm_runtime.Interpreter.from_bytes( - model_data, custom_op_registerers) + interpreter = runtime.Interpreter.from_bytes(model_data, + custom_op_registerers) if __name__ == "__main__":
diff --git a/tensorflow/lite/micro/tools/BUILD b/tensorflow/lite/micro/tools/BUILD index 6cab49e..b18e019 100644 --- a/tensorflow/lite/micro/tools/BUILD +++ b/tensorflow/lite/micro/tools/BUILD
@@ -59,7 +59,7 @@ ], deps = [ ":requantize_flatbuffer", - "//tensorflow/lite/micro/python/interpreter/src:tflm_runtime", + "//tensorflow/lite/micro/python/interpreter/src:runtime", requirement("numpy"), requirement("tensorflow-cpu"), ],
diff --git a/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py b/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py index 3dae5a8..d6aa238 100644 --- a/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py +++ b/tensorflow/lite/micro/tools/requantize_flatbuffer_test.py
@@ -20,7 +20,7 @@ from tensorflow.python.framework import test_util from tensorflow.python.platform import test from tflite_micro.tensorflow.lite.micro.tools import requantize_flatbuffer -from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime +from tflite_micro.tensorflow.lite.micro.python.interpreter.src import runtime from tflite_micro.tensorflow.lite.tools import flatbuffer_utils @@ -92,9 +92,9 @@ int8_converted_int16_model = convert_8to16_requantizer( keras_model, representative_dataset_gen) - interpreter_tfl_converted = tflm_runtime.Interpreter.from_bytes( + interpreter_tfl_converted = runtime.Interpreter.from_bytes( tfl_converted_int16_model) - interpreter_tool_converted = tflm_runtime.Interpreter.from_bytes( + interpreter_tool_converted = runtime.Interpreter.from_bytes( int8_converted_int16_model) num_steps = 10