Searched refs:getQuantizationMax (Results 1 – 3 of 3) sorted by relevance
495 const hexagon_nn_input& in1_max = model->getQuantizationMax(ins[0]); in add()497 const hexagon_nn_input& in2_max = model->getQuantizationMax(ins[1]); in add()548 const hexagon_nn_input& in_max = model->getQuantizationMax(ins[0]); in average_pool_2d()569 inputs[i + 1 + numInputTensors * 2] = model->getQuantizationMax(ins[i]); in concatenation()622 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]); in conv_2d()624 const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]); in conv_2d()626 const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]); in conv_2d()679 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]); in depthwise_conv_2d()681 const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]); in depthwise_conv_2d()683 const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]); in depthwise_conv_2d()[all …]
92 const hexagon_nn_input& getQuantizationMax(uint32_t operand);
177 const hexagon_nn_input& Model::getQuantizationMax(uint32_t operand) { in getQuantizationMax() function in android::hardware::neuralnetworks::V1_0::implementation::hexagon::Model425 const hexagon_nn_input& new_max = getQuantizationMax(outputs[0]); in addFusedQuant8Operation()