aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc116
1 files changed, 57 insertions, 59 deletions
diff --git a/tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc b/tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc
index 479f6a7d3c..1535f750f9 100644
--- a/tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/layer_norm_lstm_test.cc
@@ -129,87 +129,85 @@ class LayerNormLSTMOpModel : public SingleOpModel {
BuildInterpreter(input_shapes);
}
- void SetInputToInputWeights(std::initializer_list<float> f) {
+ void SetInputToInputWeights(std::vector<float> f) {
PopulateTensor(input_to_input_weights_, f);
}
- void SetInputToForgetWeights(std::initializer_list<float> f) {
+ void SetInputToForgetWeights(std::vector<float> f) {
PopulateTensor(input_to_forget_weights_, f);
}
- void SetInputToCellWeights(std::initializer_list<float> f) {
+ void SetInputToCellWeights(std::vector<float> f) {
PopulateTensor(input_to_cell_weights_, f);
}
- void SetInputToOutputWeights(std::initializer_list<float> f) {
+ void SetInputToOutputWeights(std::vector<float> f) {
PopulateTensor(input_to_output_weights_, f);
}
- void SetRecurrentToInputWeights(std::initializer_list<float> f) {
+ void SetRecurrentToInputWeights(std::vector<float> f) {
PopulateTensor(recurrent_to_input_weights_, f);
}
- void SetRecurrentToForgetWeights(std::initializer_list<float> f) {
+ void SetRecurrentToForgetWeights(std::vector<float> f) {
PopulateTensor(recurrent_to_forget_weights_, f);
}
- void SetRecurrentToCellWeights(std::initializer_list<float> f) {
+ void SetRecurrentToCellWeights(std::vector<float> f) {
PopulateTensor(recurrent_to_cell_weights_, f);
}
- void SetRecurrentToOutputWeights(std::initializer_list<float> f) {
+ void SetRecurrentToOutputWeights(std::vector<float> f) {
PopulateTensor(recurrent_to_output_weights_, f);
}
- void SetCellToInputWeights(std::initializer_list<float> f) {
+ void SetCellToInputWeights(std::vector<float> f) {
PopulateTensor(cell_to_input_weights_, f);
}
- void SetCellToForgetWeights(std::initializer_list<float> f) {
+ void SetCellToForgetWeights(std::vector<float> f) {
PopulateTensor(cell_to_forget_weights_, f);
}
- void SetCellToOutputWeights(std::initializer_list<float> f) {
+ void SetCellToOutputWeights(std::vector<float> f) {
PopulateTensor(cell_to_output_weights_, f);
}
- void SetInputLayerNormWeights(std::initializer_list<float> f) {
+ void SetInputLayerNormWeights(std::vector<float> f) {
PopulateTensor(input_layer_norm_weights_, f);
}
- void SetForgetLayerNormWeights(std::initializer_list<float> f) {
+ void SetForgetLayerNormWeights(std::vector<float> f) {
PopulateTensor(forget_layer_norm_weights_, f);
}
- void SetCellLayerNormWeights(std::initializer_list<float> f) {
+ void SetCellLayerNormWeights(std::vector<float> f) {
PopulateTensor(cell_layer_norm_weights_, f);
}
- void SetOutputLayerNormWeights(std::initializer_list<float> f) {
+ void SetOutputLayerNormWeights(std::vector<float> f) {
PopulateTensor(output_layer_norm_weights_, f);
}
- void SetInputGateBias(std::initializer_list<float> f) {
+ void SetInputGateBias(std::vector<float> f) {
PopulateTensor(input_gate_bias_, f);
}
- void SetForgetGateBias(std::initializer_list<float> f) {
+ void SetForgetGateBias(std::vector<float> f) {
PopulateTensor(forget_gate_bias_, f);
}
- void SetCellBias(std::initializer_list<float> f) {
- PopulateTensor(cell_bias_, f);
- }
+ void SetCellBias(std::vector<float> f) { PopulateTensor(cell_bias_, f); }
- void SetOutputGateBias(std::initializer_list<float> f) {
+ void SetOutputGateBias(std::vector<float> f) {
PopulateTensor(output_gate_bias_, f);
}
- void SetProjectionWeights(std::initializer_list<float> f) {
+ void SetProjectionWeights(std::vector<float> f) {
PopulateTensor(projection_weights_, f);
}
- void SetProjectionBias(std::initializer_list<float> f) {
+ void SetProjectionBias(std::vector<float> f) {
PopulateTensor(projection_bias_, f);
}
@@ -278,67 +276,67 @@ class HybridLayerNormLSTMOpModel : public LayerNormLSTMOpModel {
use_projection_bias, cell_clip, proj_clip,
input_shapes, TensorType_UINT8) {}
- void SetInputToInputWeights(std::initializer_list<float> f) {
+ void SetInputToInputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(input_to_input_weights_, f);
}
- void SetInputToForgetWeights(std::initializer_list<float> f) {
+ void SetInputToForgetWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(input_to_forget_weights_, f);
}
- void SetInputToCellWeights(std::initializer_list<float> f) {
+ void SetInputToCellWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(input_to_cell_weights_, f);
}
- void SetInputToOutputWeights(std::initializer_list<float> f) {
+ void SetInputToOutputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(input_to_output_weights_, f);
}
- void SetRecurrentToInputWeights(std::initializer_list<float> f) {
+ void SetRecurrentToInputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(recurrent_to_input_weights_, f);
}
- void SetRecurrentToForgetWeights(std::initializer_list<float> f) {
+ void SetRecurrentToForgetWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(recurrent_to_forget_weights_, f);
}
- void SetRecurrentToCellWeights(std::initializer_list<float> f) {
+ void SetRecurrentToCellWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(recurrent_to_cell_weights_, f);
}
- void SetRecurrentToOutputWeights(std::initializer_list<float> f) {
+ void SetRecurrentToOutputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(recurrent_to_output_weights_, f);
}
- void SetCellToInputWeights(std::initializer_list<float> f) {
+ void SetCellToInputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(cell_to_input_weights_, f);
}
- void SetCellToForgetWeights(std::initializer_list<float> f) {
+ void SetCellToForgetWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(cell_to_forget_weights_, f);
}
- void SetCellToOutputWeights(std::initializer_list<float> f) {
+ void SetCellToOutputWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(cell_to_output_weights_, f);
}
- void SetInputLayerNormWeights(std::initializer_list<float> f) {
+ void SetInputLayerNormWeights(std::vector<float> f) {
PopulateTensor(input_layer_norm_weights_, f);
}
- void SetForgetLayerNormWeights(std::initializer_list<float> f) {
+ void SetForgetLayerNormWeights(std::vector<float> f) {
PopulateTensor(forget_layer_norm_weights_, f);
}
- void SetCellLayerNormWeights(std::initializer_list<float> f) {
+ void SetCellLayerNormWeights(std::vector<float> f) {
PopulateTensor(cell_layer_norm_weights_, f);
}
- void SetOutputLayerNormWeights(std::initializer_list<float> f) {
+ void SetOutputLayerNormWeights(std::vector<float> f) {
PopulateTensor(output_layer_norm_weights_, f);
}
- void SetProjectionWeights(std::initializer_list<float> f) {
+ void SetProjectionWeights(std::vector<float> f) {
SymmetricQuantizeAndPopulate(projection_weights_, f);
}
};
@@ -346,26 +344,26 @@ class HybridLayerNormLSTMOpModel : public LayerNormLSTMOpModel {
class BaseLayerNormLstmTest : public ::testing::Test {
protected:
// Weights of the Layer Norm LSTM model. Some are optional.
- std::initializer_list<float> input_to_input_weights_;
- std::initializer_list<float> input_to_cell_weights_;
- std::initializer_list<float> input_to_forget_weights_;
- std::initializer_list<float> input_to_output_weights_;
- std::initializer_list<float> input_gate_bias_;
- std::initializer_list<float> cell_gate_bias_;
- std::initializer_list<float> forget_gate_bias_;
- std::initializer_list<float> output_gate_bias_;
- std::initializer_list<float> recurrent_to_input_weights_;
- std::initializer_list<float> recurrent_to_cell_weights_;
- std::initializer_list<float> recurrent_to_forget_weights_;
- std::initializer_list<float> recurrent_to_output_weights_;
- std::initializer_list<float> cell_to_input_weights_;
- std::initializer_list<float> cell_to_forget_weights_;
- std::initializer_list<float> cell_to_output_weights_;
- std::initializer_list<float> input_layer_norm_weights_;
- std::initializer_list<float> forget_layer_norm_weights_;
- std::initializer_list<float> cell_layer_norm_weights_;
- std::initializer_list<float> output_layer_norm_weights_;
- std::initializer_list<float> projection_weights_;
+ std::vector<float> input_to_input_weights_;
+ std::vector<float> input_to_cell_weights_;
+ std::vector<float> input_to_forget_weights_;
+ std::vector<float> input_to_output_weights_;
+ std::vector<float> input_gate_bias_;
+ std::vector<float> cell_gate_bias_;
+ std::vector<float> forget_gate_bias_;
+ std::vector<float> output_gate_bias_;
+ std::vector<float> recurrent_to_input_weights_;
+ std::vector<float> recurrent_to_cell_weights_;
+ std::vector<float> recurrent_to_forget_weights_;
+ std::vector<float> recurrent_to_output_weights_;
+ std::vector<float> cell_to_input_weights_;
+ std::vector<float> cell_to_forget_weights_;
+ std::vector<float> cell_to_output_weights_;
+ std::vector<float> input_layer_norm_weights_;
+ std::vector<float> forget_layer_norm_weights_;
+ std::vector<float> cell_layer_norm_weights_;
+ std::vector<float> output_layer_norm_weights_;
+ std::vector<float> projection_weights_;
// Layer Norm LSTM input is stored as num_batch x num_inputs vector.
std::vector<std::vector<float>> layer_norm_lstm_input_;