Searched refs:weightsScale (Results 1 – 3 of 3) sorted by relevance
/frameworks/ml/nn/common/operations/ |
D | QuantizedLSTMTest.cpp | 275 float weightsScale = 0.00408021; in TEST_F() local 291 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 293 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 295 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 297 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 303 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 305 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 307 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 309 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 315 OperandTypeParams(Type::TENSOR_INT32, {outputSize}, weightsScale / 128., 0), in TEST_F() [all …]
|
D | QuantizedLSTM.cpp | 265 const float weightsScale = inputToInputWeights->scale; in prepare() local 266 NN_RET_CHECK(weightsScale != 0); in prepare() 273 NN_RET_CHECK_EQ(weights->scale, weightsScale); in prepare() 297 NN_RET_CHECK_EQ(biasScale, weightsScale / 128.0); in prepare()
|
D | FullyConnected.cpp | 250 const float weightsScale = context->getInputShape(kWeightsTensor).scale; in validate() local 252 bool meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * weightsScale); in validate()
|