Searched refs:getQuantizationMin (Results 1 – 3 of 3) sorted by relevance
494 const hexagon_nn_input& in1_min = model->getQuantizationMin(ins[0]); in add()496 const hexagon_nn_input& in2_min = model->getQuantizationMin(ins[1]); in add()547 const hexagon_nn_input& in_min = model->getQuantizationMin(ins[0]); in average_pool_2d()568 inputs[i + 1 + numInputTensors * 1] = model->getQuantizationMin(ins[i]); in concatenation()621 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]); in conv_2d()623 const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]); in conv_2d()625 const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]); in conv_2d()678 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]); in depthwise_conv_2d()680 const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]); in depthwise_conv_2d()682 const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]); in depthwise_conv_2d()[all …]
91 const hexagon_nn_input& getQuantizationMin(uint32_t operand);
165 const hexagon_nn_input& Model::getQuantizationMin(uint32_t operand) { in getQuantizationMin() function in android::hardware::neuralnetworks::V1_0::implementation::hexagon::Model424 const hexagon_nn_input& new_min = getQuantizationMin(outputs[0]); in addFusedQuant8Operation()