/frameworks/ml/nn/common/operations/ |
D | Reshape.cpp | 35 const Shape& outputShape) { in copyData() argument 44 T* outputData, const Shape& outputShape) { in depthToSpaceGeneric() argument 47 outputData, convertShapeToDims(outputShape)); in depthToSpaceGeneric() 52 const Shape& outputShape); 55 const Shape& outputShape); 58 const Shape& outputShape); 61 const Shape& outputShape); 65 T* outputData, const Shape& outputShape) { in spaceToDepthGeneric() argument 68 outputData, convertShapeToDims(outputShape)); in spaceToDepthGeneric() 73 const Shape& outputShape); [all …]
|
D | Activation.cpp | 52 bool reluFloat(const T* inputData, const Shape& inputShape, T* outputData, const Shape& outputShape, in reluFloat() argument 63 const Shape& outputShape, float reluMin, float reluMax); 65 _Float16* outputData, const Shape& outputShape, float reluMin, 70 const Shape& outputShape) { in relu1Float() argument 71 return reluFloat(inputData, inputShape, outputData, outputShape, -1.f, 1.f); in relu1Float() 74 const Shape& outputShape); 76 _Float16* outputData, const Shape& outputShape); 80 const Shape& outputShape) { in relu6Float() argument 81 return reluFloat(inputData, inputShape, outputData, outputShape, 0.f, 6.f); in relu6Float() 84 const Shape& outputShape); [all …]
|
D | MaximumMinimum.cpp | 38 bool isMinimum, T* outputData, const Shape& outputShape) { in evalGeneric() argument 41 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalGeneric() 43 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalGeneric() 64 bool isMinimum, T* outputData, const Shape& outputShape) { in evalQuant8() argument 67 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalQuant8() 69 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalQuant8() 79 T aValue = requantize<T>(aData[aFlatIndex], aShape, outputShape); in evalQuant8() 80 T bValue = requantize<T>(bData[bFlatIndex], bShape, outputShape); in evalQuant8() 98 bool isMinimum, void* output, const Shape& outputShape) { in eval() argument 104 reinterpret_cast<_Float16*>(output), outputShape); in eval() [all …]
|
D | Pooling.cpp | 140 float* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 142 auto op_params = param.toTfliteParam(outputShape); in averagePoolNhwc() 145 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 150 _Float16* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 153 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in averagePoolNhwc() 157 outputShape); in averagePoolNhwc() 163 uint8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 165 auto op_params = param.toTfliteParam(outputShape); in averagePoolNhwc() 168 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 173 int8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument [all …]
|
D | SimpleMath.cpp | 35 const Shape& outputShape) { in meanFloat16() argument 40 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in meanFloat16() 42 outputDataFloat32.data(), outputShape); in meanFloat16() 49 bool keepDims, T* outputData, const Shape& outputShape) { in meanGeneric() argument 59 U* tempSumBuffer = new (std::nothrow) U[getNumberOfElements(outputShape)]; in meanGeneric() 68 reinterpret_cast<const int*>(outputShape.dimensions.data()), in meanGeneric() 69 getNumberOfDimensions(outputShape), axis, axisSize, keepDims, scratchBuffer, in meanGeneric() 79 float* outputData, const Shape& outputShape); 83 const Shape& outputShape); 87 const Shape& outputShape);
|
D | FullyConnected.cpp | 58 float* outputData, const Shape& outputShape) { in fullyConnectedFloat32() argument 65 uint32_t batch_size = getSizeOfDimension(outputShape, 0); in fullyConnectedFloat32() 73 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 80 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 88 _Float16* outputData, const Shape& outputShape) { in fullyConnectedFloat16() argument 97 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in fullyConnectedFloat16() 100 outputDataFloat32.data(), outputShape); in fullyConnectedFloat16() 109 uint8_t* outputData, const Shape& outputShape) { in fullyConnectedQuant8() argument 113 int32_t outputOffset = outputShape.offset; in fullyConnectedQuant8() 121 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, weightsShape, biasShape, outputShape, in fullyConnectedQuant8() [all …]
|
D | Pow.cpp | 36 const Shape& exponentShape, T* outputData, const Shape& outputShape) { in evalGeneric() argument 39 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalGeneric() 41 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalGeneric() 71 const Shape& exponentShape, void* outputData, const Shape& outputShape) { in eval() argument 76 reinterpret_cast<_Float16*>(outputData), outputShape); in eval() 81 reinterpret_cast<float*>(outputData), outputShape); in eval()
|
D | Quantize.cpp | 43 bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) { in quantizeToQuant8() argument 45 uint32_t size = getNumberOfElements(outputShape); in quantizeToQuant8() 48 0.0f, std::min<float>(255.0f, outputShape.offset + std::round(inputData[i] / in quantizeToQuant8() 49 outputShape.scale)))); in quantizeToQuant8() 55 bool quantizeToQuant8Signed(const T* inputData, int8_t* outputData, const Shape& outputShape) { in quantizeToQuant8Signed() argument 57 uint32_t size = getNumberOfElements(outputShape); in quantizeToQuant8Signed() 61 std::min<float>(127.0f, outputShape.offset + in quantizeToQuant8Signed() 62 std::round(inputData[i] / outputShape.scale)))); in quantizeToQuant8Signed()
|
D | Cast.cpp | 48 const Shape& outputShape) { in copyToTensor() argument 56 switch (outputShape.type) { in copyToTensor() 76 const Shape& outputShape) { in eval() argument 84 outputShape); \ in eval() 94 if (inputShape.type == outputShape.type) { in eval() 95 return copyData(inputData, inputShape, outputData, outputShape); in eval()
|
D | Conv2D.cpp | 137 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \ 138 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \ 145 im2colDim.sizes[3] = (int)getSizeOfDimension(outputShape, 0); \ 146 im2colDim.sizes[2] = (int)getSizeOfDimension(outputShape, 1); \ 147 im2colDim.sizes[1] = (int)getSizeOfDimension(outputShape, 2); \ 198 float* outputData, const Shape& outputShape) { in convNhwc() argument 218 convertShapeToDims(outputShape), need_im2colData ? im2colData : nullptr, im2colDim); in convNhwc() 227 uint8_t* outputData, const Shape& outputShape) { in convNhwc() argument 234 int32_t outputOffset = outputShape.offset; in convNhwc() 242 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in convNhwc() [all …]
|
D | GroupedConv2D.cpp | 41 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \ 42 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \ 43 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \ 51 const Shape& outputShape) { in groupedConvFloat32() argument 109 const Shape& outputShape) { in groupedConvQuant8() argument 115 int32_t outputOffset = outputShape.offset; in groupedConvQuant8() 120 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in groupedConvQuant8() 127 CalculateActivationRange<T>(activation, outputShape, &output_activation_min, in groupedConvQuant8() 188 const Shape& outputShape); 197 const Shape& outputShape); [all …]
|
D | L2Normalization.cpp | 48 float* outputData, const Shape& outputShape) { in l2normFloat32Impl() argument 76 uint8_t* outputData, const Shape& outputShape) { in l2normQuant8Impl() argument 108 int8_t* outputData, const Shape& outputShape) { in l2normQuant8SignedImpl() argument 139 const Shape& outputShape) { in l2normFloat32() argument 147 convertShapeToTflshape(outputShape), outputData); in l2normFloat32() 150 return l2normFloat32Impl(inputData, inputShape, axis, outputData, outputShape); in l2normFloat32() 155 _Float16* outputData, const Shape& outputShape) { in l2normFloat16() argument 159 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in l2normFloat16() 161 l2normFloat32(inputDataFloat32.data(), inputShape, axis, outputDataFloat32.data(), outputShape); in l2normFloat16() 168 uint8_t* outputData, const Shape& outputShape) { in l2normQuant8() argument [all …]
|
D | Concatenation.cpp | 50 const Shape& outputShape) { in concatenation() argument 61 getNumberOfDimensions(outputShape) - axis - 1, inputDataPtrs.data(), in concatenation() 62 inputDimsPtr.data(), num_inputs, outputData, convertShapeToDims(outputShape)); in concatenation() 70 uint8_t* outputData, const Shape& outputShape) { in concatenation() argument 86 getNumberOfDimensions(outputShape) - axis - 1, inputDataPtrs.data(), in concatenation() 88 convertShapeToDims(outputShape), outputShape.offset, outputShape.scale); in concatenation() 129 Shape outputShape(context->getOutputShape(kOutputTensor)); in concatenation() local 130 outputShape.offset += 128; in concatenation() 132 output_uint8.data(), outputShape)); in concatenation()
|
D | Softmax.cpp | 52 int32_t axis, float* outputData, const Shape& outputShape) { in softmaxSlowFloat32() argument 84 float* outputData, const Shape& outputShape) { in softmaxFloat32() argument 92 convertShapeToTflshape(outputShape), outputData); in softmaxFloat32() 95 return softmaxSlowFloat32(inputData, inputShape, beta, axis, outputData, outputShape); in softmaxFloat32() 100 int32_t axis, _Float16* outputData, const Shape& outputShape) { in softmaxFloat16() argument 104 std::vector<float> outputData_float32(getNumberOfElements(outputShape)); in softmaxFloat16() 107 outputShape); in softmaxFloat16() 116 T* outputData, const Shape& outputShape) { in softmaxQuant8Impl() argument 202 T* outputData, const Shape& outputShape) { in softmaxQuant8() argument 206 if ((inputShape.type == OperandType::TENSOR_QUANT8_ASYMM && outputShape.offset != 0) || in softmaxQuant8() [all …]
|
D | PRelu.cpp | 48 const Shape& outputShape) { in eval() argument 51 IndexedShapeWrapper outputShapeIndexed(outputShape); in eval() 52 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in eval() 71 T* outputData, const Shape& outputShape) { in evalQuant8() argument 74 const int32_t output_offset = outputShape.offset; in evalQuant8() 76 const double real_multiplier_pos = aShape.scale / outputShape.scale; in evalQuant8() 77 const double real_multiplier_neg = input_product_scale / outputShape.scale; in evalQuant8() 98 aData, aShape, bData, bShape, outputData, outputShape); in evalQuant8()
|
D | DepthwiseConv2D.cpp | 125 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \ 126 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \ 137 const Shape& outputShape) { in depthwiseConvNhwc() argument 160 convertShapeToTflshape(outputShape), outputData); in depthwiseConvNhwc() 171 _Float16* outputData, const Shape& outputShape) { in depthwiseConvNhwc() argument 180 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in depthwiseConvNhwc() 185 outputShape); in depthwiseConvNhwc() 197 const Shape& outputShape) { in depthwiseConvNhwc() argument 208 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in depthwiseConvNhwc() 213 CalculateActivationRangeUint8(activation, outputShape, &output_activation_min, in depthwiseConvNhwc() [all …]
|
D | ResizeImageOps.cpp | 67 bool halfPixelCenters, T* outputData, const Shape& outputShape) { in resizeNearestNeighbor() argument 72 const int outHeight = getSizeOfDimension(outputShape, 1); in resizeNearestNeighbor() 73 const int outWidth = getSizeOfDimension(outputShape, 2); in resizeNearestNeighbor() 111 const Shape& outputShape) { in resizeImageOpNhwc() argument 113 int32_t height = static_cast<int32_t>(getSizeOfDimension(outputShape, 1)); in resizeImageOpNhwc() 114 int32_t width = static_cast<int32_t>(getSizeOfDimension(outputShape, 2)); in resizeImageOpNhwc() 125 outDimData, convertShapeToTflshape(outputShape), outputData); in resizeImageOpNhwc() 130 outputShape); in resizeImageOpNhwc() 138 _Float16* outputData, const Shape& outputShape) { in resizeImageOpNhwc() argument 142 std::vector<float> outputData_float32(getNumberOfElements(outputShape)); in resizeImageOpNhwc() [all …]
|
D | TransposeConv2D.cpp | 117 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \ 118 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \ 119 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \ 128 const Shape& outputShape) { in transposeConvNhwc() argument 135 memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float)); in transposeConvNhwc() 184 const TransposeConv2dParam& param, T* outputData, const Shape& outputShape) { in transposeConvNhwc() argument 190 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t); in transposeConvNhwc() 204 int32_t outputOffset = outputShape.offset; in transposeConvNhwc() 209 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in transposeConvNhwc() 216 CalculateActivationRange<T>(activation, outputShape, &outputActivationMin, in transposeConvNhwc() [all …]
|
D | Reduce.cpp | 55 const Shape outputShape = context->getOutputShape(kOutputTensor); in compute() local 64 reinterpret_cast<const int32_t*>(outputShape.dimensions.data()), in compute() 65 outputShape.dimensions.size(), context->getInputBuffer<int32_t>(kInputAxes), numAxes, in compute() 145 Shape outputShape = inputShape; in prepare() local 146 outputShape.dimensions.clear(); in prepare() 151 outputShape.dimensions.push_back(1); in prepare() 154 outputShape.dimensions.push_back(getSizeOfDimension(inputShape, axis)); in prepare() 159 if (outputShape.dimensions.empty()) { in prepare() 160 outputShape.dimensions.push_back(1); in prepare() 163 return context->setOutputShape(kOutputTensor, outputShape); in prepare()
|
D | Slice.cpp | 54 T* outputData, const Shape& outputShape) { in evalGeneric() argument 55 const int outputSize = getNumberOfElements(outputShape); in evalGeneric() 56 const IndexedShapeWrapper indexedOutput = IndexedShapeWrapper(outputShape); in evalGeneric() 58 std::vector<uint32_t> outputIndex(getNumberOfDimensions(outputShape), 0); in evalGeneric() 121 Shape outputShape = context->getOutputShape(kOutputTensor); in prepare() local 122 outputShape.dimensions.resize(n_dims); in prepare() 132 outputShape.dimensions[i] = sliceSize; in prepare() 134 return context->setOutputShape(kOutputTensor, outputShape); in prepare()
|
D | LocalResponseNormalization.cpp | 53 const Shape& outputShape) { in localResponseNormFloat32Impl() argument 82 T beta, int32_t axis, T* outputData, const Shape& outputShape); 87 const Shape& outputShape) { in localResponseNorm() argument 97 convertShapeToTflshape(outputShape), outputData); in localResponseNorm() 101 outputData, outputShape); in localResponseNorm() 108 _Float16* outputData, const Shape& outputShape) { in localResponseNorm() argument 112 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in localResponseNorm() 115 outputDataFloat32.data(), outputShape); in localResponseNorm()
|
D | Multinomial.cpp | 64 Shape* outputShape) { in Prepare() argument 76 outputShape->type = OperandType::TENSOR_INT32; in Prepare() 77 outputShape->dimensions = {batch_size, sample_count}; in Prepare() 78 outputShape->offset = inputShape.offset; in Prepare() 79 outputShape->scale = inputShape.scale; in Prepare()
|
D | LSHProjection.cpp | 46 Shape* outputShape) { in Prepare() argument 70 outputShape->dimensions = {SizeOfDimension(hash, 0)}; in Prepare() 77 outputShape->dimensions = {SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1)}; in Prepare() 84 outputShape->type = OperandType::TENSOR_INT32; in Prepare() 85 outputShape->offset = 0; in Prepare() 86 outputShape->scale = 0.f; in Prepare()
|
/frameworks/ml/nn/common/include/ |
D | Operations.h | 54 _Float16* outputData, const Shape& outputShape); 61 const Shape& outputShape); 68 uint8_t* outputData, const Shape& outputShape); 77 const Shape& outputShape); 81 _Float16* outputData, const Shape& outputShape); 84 const Shape& outputShape); 87 const Shape& outputShape); 91 T* outputData, const Shape& outputShape); 94 T* outputData, const Shape& outputShape); 98 T* outputData, const Shape& outputShape); [all …]
|
/frameworks/ml/nn/common/ |
D | OperationsUtils.cpp | 50 void CalculateActivationRangeImpl(int32_t activation, const Shape& outputShape, int32_t qmin, in CalculateActivationRangeImpl() argument 52 const auto scale = outputShape.scale; in CalculateActivationRangeImpl() 53 const auto zero_point = outputShape.offset; in CalculateActivationRangeImpl() 259 const Shape& biasShape, const Shape& outputShape, in GetQuantizedConvolutionMultipler() argument 269 *multiplier = input_product_scale / outputShape.scale; in GetQuantizedConvolutionMultipler() 273 void CalculateActivationRangeUint8(int32_t activation, const Shape& outputShape, int32_t* act_min, in CalculateActivationRangeUint8() argument 278 CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max); in CalculateActivationRangeUint8() 281 void CalculateActivationRangeInt8(int32_t activation, const Shape& outputShape, int32_t* act_min, in CalculateActivationRangeInt8() argument 286 CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max); in CalculateActivationRangeInt8() 462 bool embeddingLookupPrepare(const Shape& valueShape, const Shape& lookupShape, Shape* outputShape) { in embeddingLookupPrepare() argument [all …]
|