1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h>
20 #include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
21
22 #include <algorithm>
23 #include <vector>
24
25 #include "CpuOperationUtils.h"
26 #include "OperationResolver.h"
27 #include "Operations.h"
28 #include "Tracing.h"
29
30 namespace android {
31 namespace nn {
32 namespace depthwise_conv_2d {
33 constexpr char kOperationName[] = "DEPTHWISE_CONV_2D";
34
35 constexpr uint32_t kNumInputsArray[] = {8, 9, 11, 12, 14};
36 constexpr uint32_t kInputTensor = 0;
37 constexpr uint32_t kFilterTensor = 1;
38 constexpr uint32_t kBiasTensor = 2;
39
40 constexpr uint32_t kNumOutputs = 1;
41 constexpr uint32_t kOutputTensor = 0;
42
43 namespace {
44
45 using namespace hal;
46
47 struct DepthwiseConv2dParam {
48 int32_t padding_left, padding_right;
49 int32_t padding_top, padding_bottom;
50 int32_t stride_width, stride_height;
51 int32_t dilation_width_factor = 1, dilation_height_factor = 1;
52 int32_t depth_multiplier;
53 int32_t activation;
54 bool useNchw = false;
55
initializeandroid::nn::depthwise_conv_2d::__anon53a4b1f70111::DepthwiseConv2dParam56 bool initialize(const IOperationExecutionContext* context) {
57 uint32_t inCount = context->getNumInputs();
58 int32_t padding_implicit = 0;
59 bool useImplicitPadding = false;
60 if ((inCount >= 9 && context->getInputType(8) == OperandType::BOOL) || inCount == 8) {
61 padding_implicit = context->getInputValue<int32_t>(3);
62 stride_width = context->getInputValue<int32_t>(4);
63 stride_height = context->getInputValue<int32_t>(5);
64 depth_multiplier = context->getInputValue<int32_t>(6);
65 activation = context->getInputValue<int32_t>(7);
66 if (inCount >= 9) {
67 useNchw = context->getInputValue<bool>(8);
68 }
69 if (inCount == 11) {
70 dilation_width_factor = context->getInputValue<int32_t>(9);
71 dilation_height_factor = context->getInputValue<int32_t>(10);
72 }
73 useImplicitPadding = true;
74 } else if (inCount >= 11 && context->getInputType(8) == OperandType::INT32) {
75 padding_left = context->getInputValue<int32_t>(3);
76 padding_right = context->getInputValue<int32_t>(4);
77 padding_top = context->getInputValue<int32_t>(5);
78 padding_bottom = context->getInputValue<int32_t>(6);
79 stride_width = context->getInputValue<int32_t>(7);
80 stride_height = context->getInputValue<int32_t>(8);
81 depth_multiplier = context->getInputValue<int32_t>(9);
82 activation = context->getInputValue<int32_t>(10);
83 if (inCount >= 12) {
84 useNchw = context->getInputValue<bool>(11);
85 }
86 if (inCount == 14) {
87 dilation_width_factor = context->getInputValue<int32_t>(12);
88 dilation_height_factor = context->getInputValue<int32_t>(13);
89 }
90 } else {
91 NN_RET_CHECK_FAIL() << "Unsupported input spec for operation " << kOperationName;
92 }
93 if (useImplicitPadding) {
94 Shape inputShape = context->getInputShape(kInputTensor);
95 Shape filterShape = context->getInputShape(kFilterTensor);
96 int32_t input_width = getSizeOfDimension(inputShape, useNchw ? 3 : 2);
97 int32_t input_height = getSizeOfDimension(inputShape, useNchw ? 2 : 1);
98 int32_t filter_width = getSizeOfDimension(filterShape, 2);
99 int32_t filter_height = getSizeOfDimension(filterShape, 1);
100 calculateExplicitPadding(input_width, stride_width, dilation_width_factor, filter_width,
101 padding_implicit, &padding_left, &padding_right);
102 calculateExplicitPadding(input_height, stride_height, dilation_height_factor,
103 filter_height, padding_implicit, &padding_top,
104 &padding_bottom);
105 }
106 NN_RET_CHECK_GE(padding_left, 0);
107 NN_RET_CHECK_GE(padding_right, 0);
108 NN_RET_CHECK_GE(padding_top, 0);
109 NN_RET_CHECK_GE(padding_bottom, 0);
110 NN_RET_CHECK_GT(stride_width, 0);
111 NN_RET_CHECK_GT(stride_height, 0);
112 NN_RET_CHECK_GT(dilation_width_factor, 0);
113 NN_RET_CHECK_GT(dilation_height_factor, 0);
114 NN_RET_CHECK_GT(depth_multiplier, 0);
115 NN_RET_CHECK_GE(activation, 0);
116 return true;
117 }
118 };
119
120 #define ANDROID_NN_DEPTHWISE_CONV_PARAMETERS \
121 uint32_t height = getSizeOfDimension(inputShape, 1); \
122 uint32_t width = getSizeOfDimension(inputShape, 2); \
123 uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
124 uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \
125 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \
126 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \
127 \
128 uint32_t paddingHeight = (uint32_t)paddingTop; \
129 uint32_t paddingWidth = (uint32_t)paddingLeft;
130
depthwiseConvNhwc(const float * inputData,const Shape & inputShape,const float * filterData,const Shape & filterShape,const float * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,float * outputData,const Shape & outputShape)131 bool depthwiseConvNhwc(const float* inputData, const Shape& inputShape, const float* filterData,
132 const Shape& filterShape, const float* biasData, const Shape& biasShape,
133 int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
134 int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
135 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
136 int32_t depthMultiplier, int32_t activation, float* outputData,
137 const Shape& outputShape) {
138 NNTRACE_TRANS("depthwiseConvFloat32");
139
140 ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
141
142 float output_activation_min, output_activation_max;
143 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
144
145 tflite::DepthwiseParams params{
146 .padding_values = {static_cast<int16>(paddingWidth), static_cast<int16>(paddingHeight),
147 0 /*width_offset*/, 0 /*height_offset*/},
148 .stride_width = static_cast<int16>(strideWidth),
149 .stride_height = static_cast<int16>(strideHeight),
150 .dilation_width_factor = static_cast<int16>(dilationWidthFactor),
151 .dilation_height_factor = static_cast<int16>(dilationHeightFactor),
152 .depth_multiplier = static_cast<int16>(depthMultiplier),
153 .float_activation_min = output_activation_min,
154 .float_activation_max = output_activation_max,
155 };
156 NNTRACE_COMP_SWITCH("optimized_ops::DepthwiseConv");
157 tflite::reference_ops::DepthwiseConv(params, convertShapeToTflshape(inputShape), inputData,
158 convertShapeToTflshape(filterShape), filterData,
159 convertShapeToTflshape(biasShape), biasData,
160 convertShapeToTflshape(outputShape), outputData);
161
162 return true;
163 }
164
depthwiseConvNhwc(const _Float16 * inputData,const Shape & inputShape,const _Float16 * filterData,const Shape & filterShape,const _Float16 * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,_Float16 * outputData,const Shape & outputShape)165 bool depthwiseConvNhwc(const _Float16* inputData, const Shape& inputShape,
166 const _Float16* filterData, const Shape& filterShape,
167 const _Float16* biasData, const Shape& biasShape, int32_t paddingLeft,
168 int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
169 int32_t strideWidth, int32_t strideHeight, int32_t dilationWidthFactor,
170 int32_t dilationHeightFactor, int32_t depthMultiplier, int32_t activation,
171 _Float16* outputData, const Shape& outputShape) {
172 NNTRACE_TRANS("depthwiseConvFloat16");
173 std::vector<float> inputDataFloat32(getNumberOfElements(inputShape));
174 convertFloat16ToFloat32(inputData, &inputDataFloat32);
175 std::vector<float> filterDataFloat32(getNumberOfElements(filterShape));
176 convertFloat16ToFloat32(filterData, &filterDataFloat32);
177 std::vector<float> biasDataFloat32(getNumberOfElements(biasShape));
178 convertFloat16ToFloat32(biasData, &biasDataFloat32);
179
180 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape));
181 depthwiseConvNhwc(inputDataFloat32.data(), inputShape, filterDataFloat32.data(), filterShape,
182 biasDataFloat32.data(), biasShape, paddingLeft, paddingRight, paddingTop,
183 paddingBottom, strideWidth, strideHeight, dilationWidthFactor,
184 dilationHeightFactor, depthMultiplier, activation, outputDataFloat32.data(),
185 outputShape);
186
187 convertFloat32ToFloat16(outputDataFloat32, outputData);
188 return true;
189 }
190
depthwiseConvNhwc(const uint8_t * inputData,const Shape & inputShape,const uint8_t * filterData,const Shape & filterShape,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,uint8_t * outputData,const Shape & outputShape)191 bool depthwiseConvNhwc(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData,
192 const Shape& filterShape, const int32_t* biasData, const Shape& biasShape,
193 int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
194 int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
195 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
196 int32_t depthMultiplier, int32_t activation, uint8_t* outputData,
197 const Shape& outputShape) {
198 NNTRACE_TRANS("depthwiseConvQuant8");
199
200 ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
201
202 double real_multiplier = 0.0;
203 int32_t output_multiplier = 0;
204 int32_t output_shift = 0;
205 int32_t output_activation_min = 0;
206 int32_t output_activation_max = 0;
207
208 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
209 &real_multiplier));
210 int exponent;
211 NN_RET_CHECK(QuantizeMultiplier(real_multiplier, &output_multiplier, &exponent));
212 output_shift = -exponent;
213 CalculateActivationRangeUint8(activation, outputShape, &output_activation_min,
214 &output_activation_max);
215
216 tflite::DepthwiseParams params{
217 .padding_values = {static_cast<int16>(paddingWidth), static_cast<int16>(paddingHeight),
218 0 /*width_offset*/, 0 /*height_offset*/},
219 .stride_width = static_cast<int16>(strideWidth),
220 .stride_height = static_cast<int16>(strideHeight),
221 .dilation_width_factor = static_cast<int16>(dilationWidthFactor),
222 .dilation_height_factor = static_cast<int16>(dilationHeightFactor),
223 .depth_multiplier = static_cast<int16>(depthMultiplier),
224 .input_offset = -inputShape.offset,
225 .weights_offset = -filterShape.offset,
226 .output_offset = outputShape.offset,
227 .output_multiplier = output_multiplier,
228 .output_shift = -output_shift,
229 .quantized_activation_min = output_activation_min,
230 .quantized_activation_max = output_activation_max,
231 };
232 NNTRACE_COMP_SWITCH("optimized_ops::DepthwiseConv");
233 tflite::reference_ops::DepthwiseConv(params, convertShapeToTflshape(inputShape), inputData,
234 convertShapeToTflshape(filterShape), filterData,
235 convertShapeToTflshape(biasShape), biasData,
236 convertShapeToTflshape(outputShape), outputData);
237 return true;
238 }
239
240 // Passing input, filter and output shapes by value, so that we can change the
241 // offsets without modifying the actual shapes.
depthwiseConvNhwc(const int8_t * inputData,Shape inputShape,const int8_t * filterData,Shape filterShape,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,int8_t * outputData,Shape outputShape)242 bool depthwiseConvNhwc(const int8_t* inputData, Shape inputShape, const int8_t* filterData,
243 Shape filterShape, const int32_t* biasData, const Shape& biasShape,
244 int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
245 int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
246 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
247 int32_t depthMultiplier, int32_t activation, int8_t* outputData,
248 Shape outputShape) {
249 NNTRACE_TRANS("depthwiseConvQuant8");
250
251 std::vector<uint8_t> unsignedInput(getNumberOfElements(inputShape));
252 convertInt8ToUInt8(inputData, &unsignedInput);
253 inputShape.offset += 128;
254
255 std::vector<uint8_t> unsignedFilter(getNumberOfElements(filterShape));
256 convertInt8ToUInt8(filterData, &unsignedFilter);
257 filterShape.offset += 128;
258
259 std::vector<uint8_t> unsignedOutput(getNumberOfElements(outputShape));
260 outputShape.offset += 128;
261
262 NN_RET_CHECK(depthwiseConvNhwc(unsignedInput.data(), inputShape, unsignedFilter.data(),
263 filterShape, biasData, biasShape, paddingLeft, paddingRight,
264 paddingTop, paddingBottom, strideWidth, strideHeight,
265 dilationWidthFactor, dilationHeightFactor, depthMultiplier,
266 activation, unsignedOutput.data(), outputShape));
267
268 convertUInt8ToInt8(unsignedOutput, outputData);
269
270 return true;
271 }
272
273 template <typename T>
depthwiseConvQuant8PerChannelNhwc(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,T * outputData,const Shape & outputShape)274 bool depthwiseConvQuant8PerChannelNhwc(
275 const T* inputData, const Shape& inputShape, const int8_t* filterData,
276 const Shape& filterShape, const float* filterScales, const int32_t* biasData,
277 const Shape& biasShape, int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
278 int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
279 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
280
281 int32_t depthMultiplier, int32_t activation, T* outputData, const Shape& outputShape) {
282 NNTRACE_TRANS("depthwiseConvQuant8");
283
284 uint32_t paddingHeight = (uint32_t)paddingTop;
285 uint32_t paddingWidth = (uint32_t)paddingLeft;
286
287 uint32_t numBatches = getSizeOfDimension(inputShape, 0);
288 uint32_t inputHeight = getSizeOfDimension(inputShape, 1);
289 uint32_t inputWidth = getSizeOfDimension(inputShape, 2);
290 uint32_t inputDepth = getSizeOfDimension(inputShape, 3);
291 uint32_t filterHeight = getSizeOfDimension(filterShape, 1);
292 uint32_t filterWidth = getSizeOfDimension(filterShape, 2);
293 uint32_t filterDepth = getSizeOfDimension(filterShape, 3);
294 uint32_t outputHeight = getSizeOfDimension(outputShape, 1);
295 uint32_t outputWidth = getSizeOfDimension(outputShape, 2);
296 uint32_t outputDepth = getSizeOfDimension(outputShape, 3);
297
298 int32_t inputOffset = -inputShape.offset;
299 int32_t outputOffset = outputShape.offset;
300
301 auto realMultiplier = std::vector<double>(outputDepth, .0f);
302 auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
303 auto outputShift = std::vector<int32_t>(outputDepth, .0f);
304
305 for (int i = 0; i < outputDepth; ++i) {
306 Shape filterChannelShape = filterShape;
307 filterChannelShape.scale = filterScales[i];
308 Shape biasChannelShape = biasShape;
309 biasChannelShape.scale = filterScales[i] * inputShape.scale;
310 NN_RET_CHECK(GetQuantizedConvolutionMultipler(
311 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
312 int exponent;
313 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent));
314 outputShift[i] = -exponent;
315 }
316
317 int32_t output_activation_min = 0, output_activation_max = 0;
318 CalculateActivationRange<T>(activation, outputShape, &output_activation_min,
319 &output_activation_max);
320
321 const T* inputBase = inputData;
322 T* outPtr = outputData;
323 for (uint32_t b = 0; b < numBatches; b++) {
324 for (uint32_t h = 0; h < outputHeight; h++) {
325 for (uint32_t w = 0; w < outputWidth; w++) {
326 for (uint32_t ic = 0; ic < inputDepth; ic++) {
327 for (uint32_t m = 0; m < depthMultiplier; m++) {
328 int32_t wInputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
329 int32_t hInputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
330 const int oc = m + ic * depthMultiplier;
331
332 int32_t sum = 0.0f;
333 for (uint32_t i = 0; i < filterHeight; i++) {
334 for (uint32_t j = 0; j < filterWidth; j++) {
335 int32_t hInput = hInputOrigin +
336 dilationHeightFactor * static_cast<int32_t>(i);
337 int32_t wInput = wInputOrigin +
338 dilationWidthFactor * static_cast<int32_t>(j);
339
340 if (hInput >= 0 && hInput < static_cast<int32_t>(inputHeight) &&
341 wInput >= 0 && wInput < static_cast<int32_t>(inputWidth)) {
342 uint32_t filterIndex =
343 i * filterWidth * filterDepth + j * filterDepth + oc;
344 uint32_t inputIndex = hInput * inputWidth * inputDepth +
345 wInput * inputDepth + ic;
346 sum += (static_cast<int32_t>(filterData[filterIndex])) *
347 (static_cast<int32_t>(inputBase[inputIndex]) +
348 inputOffset);
349 }
350 }
351 }
352
353 sum += biasData[oc];
354 sum = tflite::MultiplyByQuantizedMultiplier(sum, outputMultiplier[oc],
355 -outputShift[oc]);
356 sum += outputOffset;
357 sum = std::max(std::min(sum, output_activation_max), output_activation_min);
358 outPtr[m] = static_cast<T>(sum);
359 }
360 outPtr += depthMultiplier;
361 }
362 }
363 }
364 inputBase += inputHeight * inputWidth * inputDepth;
365 }
366
367 return true;
368 }
369
370 template <typename T_Input, typename T_Filter, typename T_Bias>
depthwiseConv(const T_Input * inputData,const Shape & inputShape,const T_Filter * filterData,const Shape & filterShape,const T_Bias * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,bool useNchw,T_Input * outputData,const Shape & outputShape)371 bool depthwiseConv(const T_Input* inputData, const Shape& inputShape, const T_Filter* filterData,
372 const Shape& filterShape, const T_Bias* biasData, const Shape& biasShape,
373 int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
374 int32_t paddingBottom, int32_t strideWidth, int32_t strideHeight,
375 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
376 int32_t depthMultiplier, int32_t activation, bool useNchw, T_Input* outputData,
377 const Shape& outputShape) {
378 InputWithLayout<T_Input> input(useNchw);
379 OutputWithLayout<T_Input> output(useNchw);
380 NN_RET_CHECK(input.initialize(inputData, inputShape));
381 NN_RET_CHECK(output.initialize(outputData, outputShape));
382 NN_RET_CHECK(depthwiseConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData,
383 filterShape, biasData, biasShape, paddingLeft, paddingRight,
384 paddingTop, paddingBottom, strideWidth, strideHeight,
385 dilationWidthFactor, dilationHeightFactor, depthMultiplier,
386 activation, output.getNhwcBuffer(), output.getNhwcShape()));
387 NN_RET_CHECK(output.commit());
388 return true;
389 }
390
391 template <typename T>
depthwiseConvQuant8PerChannel(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t depthMultiplier,int32_t activation,bool useNchw,T * outputData,const Shape & outputShape)392 bool depthwiseConvQuant8PerChannel(const T* inputData, const Shape& inputShape,
393 const int8_t* filterData, const Shape& filterShape,
394 const float* filterScales, const int32_t* biasData,
395 const Shape& biasShape, int32_t paddingLeft,
396 int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
397 int32_t strideWidth, int32_t strideHeight,
398 int32_t dilationWidthFactor, int32_t dilationHeightFactor,
399 int32_t depthMultiplier, int32_t activation, bool useNchw,
400 T* outputData, const Shape& outputShape) {
401 InputWithLayout<T> input(useNchw);
402 OutputWithLayout<T> output(useNchw);
403 NN_RET_CHECK(input.initialize(inputData, inputShape));
404 NN_RET_CHECK(output.initialize(outputData, outputShape));
405 NN_RET_CHECK(depthwiseConvQuant8PerChannelNhwc(
406 input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, filterScales,
407 biasData, biasShape, paddingLeft, paddingRight, paddingTop, paddingBottom, strideWidth,
408 strideHeight, dilationWidthFactor, dilationHeightFactor, depthMultiplier, activation,
409 output.getNhwcBuffer(), output.getNhwcShape()));
410 NN_RET_CHECK(output.commit());
411 return true;
412 }
413
414 #undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
415
416 } // namespace
417
validate(const IOperationValidationContext * context)418 bool validate(const IOperationValidationContext* context) {
419 const uint32_t numInputs = context->getNumInputs();
420 NN_RET_CHECK(
421 std::binary_search(std::begin(kNumInputsArray), std::end(kNumInputsArray), numInputs));
422 NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
423 auto inputType = context->getInputType(kInputTensor);
424 auto filterType = context->getInputType(kFilterTensor);
425 std::vector<OperandType> inExpectedTypes;
426 if (inputType == OperandType::TENSOR_FLOAT32) {
427 inExpectedTypes = {
428 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
429 OperandType::TENSOR_FLOAT32, OperandType::INT32,
430 OperandType::INT32, OperandType::INT32,
431 OperandType::INT32, OperandType::INT32,
432 };
433 } else if (inputType == OperandType::TENSOR_FLOAT16) {
434 inExpectedTypes = {
435 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
436 OperandType::TENSOR_FLOAT16, OperandType::INT32,
437 OperandType::INT32, OperandType::INT32,
438 OperandType::INT32, OperandType::INT32,
439 };
440 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
441 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
442 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
443 filterType == inputType)
444 << "Unsupported filter tensor type for operation " << kOperationName;
445 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
446 NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
447 3)
448 << "Unsupported filter tensor channel dimension for operation "
449 << kOperationName;
450 }
451 inExpectedTypes = {
452 inputType, filterType, OperandType::TENSOR_INT32,
453 OperandType::INT32, OperandType::INT32, OperandType::INT32,
454 OperandType::INT32, OperandType::INT32,
455 };
456 } else {
457 NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName;
458 }
459
460 // NeuralNetworks.h specifies that ANEURALNETWORKS_DEPTHWISE_CONV_2D's output must
461 // meet "outputScale > inputScale * filterScale" for the operand type
462 // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29. For other
463 // operand types (e.g., ANEURALNETWORKS_TENSOR_FLOAT32), this constraint
464 // does not apply, so by default the constraint is met.
465 bool meetsQuantizedScaleConstraintBeforeV1_2 = true;
466 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
467 const float inputScale = context->getInputShape(kInputTensor).scale;
468 const float filterScale = context->getInputShape(kFilterTensor).scale;
469 const float outputScale = context->getInputShape(kOutputTensor).scale;
470 meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * filterScale);
471 }
472
473 bool withExplicitPadding = false;
474 bool withLayout = false;
475 bool withDilation = false;
476 if (numInputs >= 9) {
477 if (context->getInputType(8) == OperandType::INT32 && numInputs >= 11) {
478 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
479 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
480 explicitScalarTypes.end());
481 withExplicitPadding = true;
482 }
483 int inputOffset = withExplicitPadding ? 3 : 0;
484 if (numInputs >= 9 + inputOffset) {
485 inExpectedTypes.push_back(OperandType::BOOL);
486 withLayout = true;
487 }
488 NN_RET_CHECK_NE(numInputs, 10 + inputOffset)
489 << "Provided only one dilation factor value, two values are required for operation "
490 << kOperationName;
491 if (numInputs == 11 + inputOffset) {
492 inExpectedTypes.push_back(OperandType::INT32);
493 inExpectedTypes.push_back(OperandType::INT32);
494 withDilation = true;
495 }
496 }
497
498 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
499 NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3));
500 } else if (inputType == OperandType::TENSOR_FLOAT16 ||
501 filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout ||
502 withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) {
503 NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
504 } else {
505 NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0));
506 }
507 return validateInputTypes(context, inExpectedTypes) &&
508 validateOutputTypes(context, {inputType});
509 }
510
prepare(IOperationExecutionContext * context)511 bool prepare(IOperationExecutionContext* context) {
512 Shape input = context->getInputShape(kInputTensor);
513 Shape filter = context->getInputShape(kFilterTensor);
514 Shape bias = context->getInputShape(kBiasTensor);
515
516 if (filter.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
517 NN_RET_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM ||
518 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED);
519 } else {
520 NN_RET_CHECK(input.type == filter.type);
521 }
522 if (input.type == OperandType::TENSOR_QUANT8_ASYMM ||
523 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
524 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32);
525 } else {
526 NN_RET_CHECK(input.type == bias.type);
527 }
528 NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
529 NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
530 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
531 NN_RET_CHECK_EQ(getSizeOfDimension(filter, 0), 1);
532 NN_RET_CHECK_EQ(getSizeOfDimension(filter, 3), getSizeOfDimension(bias, 0));
533
534 DepthwiseConv2dParam param;
535 NN_RET_CHECK(param.initialize(context));
536
537 uint32_t batches = getSizeOfDimension(input, 0);
538 uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1);
539 uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2);
540 uint32_t channels_in = getSizeOfDimension(input, param.useNchw ? 1 : 3);
541 uint32_t channels_out = getSizeOfDimension(filter, 3);
542 uint32_t filterHeight = getSizeOfDimension(filter, 1);
543 uint32_t filterWidth = getSizeOfDimension(filter, 2);
544
545 NN_OPS_CHECK(param.depth_multiplier * channels_in == channels_out);
546 int32_t effectiveFilterWidth = (filterWidth - 1) * param.dilation_width_factor + 1;
547 int32_t effectiveFilterHeight = (filterHeight - 1) * param.dilation_height_factor + 1;
548 NN_RET_CHECK_GT(effectiveFilterWidth, param.padding_left);
549 NN_RET_CHECK_GT(effectiveFilterWidth, param.padding_right);
550 NN_RET_CHECK_GT(effectiveFilterHeight, param.padding_top);
551 NN_RET_CHECK_GT(effectiveFilterHeight, param.padding_bottom);
552
553 uint32_t outHeight =
554 computeOutSize(height, filterHeight, param.stride_height, param.dilation_height_factor,
555 param.padding_top, param.padding_bottom);
556 uint32_t outWidth =
557 computeOutSize(width, filterWidth, param.stride_width, param.dilation_width_factor,
558 param.padding_left, param.padding_right);
559
560 Shape output = context->getOutputShape(kOutputTensor);
561 output.type = input.type;
562 if (param.useNchw) {
563 output.dimensions = {batches, channels_out, outHeight, outWidth};
564 } else {
565 output.dimensions = {batches, outHeight, outWidth, channels_out};
566 }
567 return context->setOutputShape(kOutputTensor, output);
568 }
569
execute(IOperationExecutionContext * context)570 bool execute(IOperationExecutionContext* context) {
571 // Bypass execution in the case of zero-sized input.
572 if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
573 DepthwiseConv2dParam param;
574 NN_RET_CHECK(param.initialize(context));
575 switch (context->getInputType(kInputTensor)) {
576 case OperandType::TENSOR_FLOAT32:
577 return depthwiseConv(context->getInputBuffer<float>(kInputTensor),
578 context->getInputShape(kInputTensor),
579 context->getInputBuffer<float>(kFilterTensor),
580 context->getInputShape(kFilterTensor),
581 context->getInputBuffer<float>(kBiasTensor),
582 context->getInputShape(kBiasTensor), param.padding_left,
583 param.padding_right, param.padding_top, param.padding_bottom,
584 param.stride_width, param.stride_height,
585 param.dilation_width_factor, param.dilation_height_factor,
586 param.depth_multiplier, param.activation, param.useNchw,
587 context->getOutputBuffer<float>(kOutputTensor),
588 context->getOutputShape(kOutputTensor));
589 case OperandType::TENSOR_FLOAT16:
590 return depthwiseConv(context->getInputBuffer<_Float16>(kInputTensor),
591 context->getInputShape(kInputTensor),
592 context->getInputBuffer<_Float16>(kFilterTensor),
593 context->getInputShape(kFilterTensor),
594 context->getInputBuffer<_Float16>(kBiasTensor),
595 context->getInputShape(kBiasTensor), param.padding_left,
596 param.padding_right, param.padding_top, param.padding_bottom,
597 param.stride_width, param.stride_height,
598 param.dilation_width_factor, param.dilation_height_factor,
599 param.depth_multiplier, param.activation, param.useNchw,
600 context->getOutputBuffer<_Float16>(kOutputTensor),
601 context->getOutputShape(kOutputTensor));
602 case OperandType::TENSOR_QUANT8_ASYMM:
603 if (context->getInputType(kFilterTensor) ==
604 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
605 return depthwiseConvQuant8PerChannel(
606 context->getInputBuffer<uint8_t>(kInputTensor),
607 context->getInputShape(kInputTensor),
608 context->getInputBuffer<int8_t>(kFilterTensor),
609 context->getInputShape(kFilterTensor),
610 context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
611 context->getInputBuffer<int32_t>(kBiasTensor),
612 context->getInputShape(kBiasTensor), param.padding_left,
613 param.padding_right, param.padding_top, param.padding_bottom,
614 param.stride_width, param.stride_height, param.dilation_width_factor,
615 param.dilation_height_factor, param.depth_multiplier, param.activation,
616 param.useNchw, context->getOutputBuffer<uint8_t>(kOutputTensor),
617 context->getOutputShape(kOutputTensor));
618 } else if (context->getInputType(kFilterTensor) == OperandType::TENSOR_QUANT8_ASYMM) {
619 return depthwiseConv(context->getInputBuffer<uint8_t>(kInputTensor),
620 context->getInputShape(kInputTensor),
621 context->getInputBuffer<uint8_t>(kFilterTensor),
622 context->getInputShape(kFilterTensor),
623 context->getInputBuffer<int32_t>(kBiasTensor),
624 context->getInputShape(kBiasTensor), param.padding_left,
625 param.padding_right, param.padding_top, param.padding_bottom,
626 param.stride_width, param.stride_height,
627 param.dilation_width_factor, param.dilation_height_factor,
628 param.depth_multiplier, param.activation, param.useNchw,
629 context->getOutputBuffer<uint8_t>(kOutputTensor),
630 context->getOutputShape(kOutputTensor));
631 } else {
632 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
633 }
634 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
635 if (context->getInputType(kFilterTensor) ==
636 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
637 return depthwiseConvQuant8PerChannel(
638 context->getInputBuffer<int8_t>(kInputTensor),
639 context->getInputShape(kInputTensor),
640 context->getInputBuffer<int8_t>(kFilterTensor),
641 context->getInputShape(kFilterTensor),
642 context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
643 context->getInputBuffer<int32_t>(kBiasTensor),
644 context->getInputShape(kBiasTensor), param.padding_left,
645 param.padding_right, param.padding_top, param.padding_bottom,
646 param.stride_width, param.stride_height, param.dilation_width_factor,
647 param.dilation_height_factor, param.depth_multiplier, param.activation,
648 param.useNchw, context->getOutputBuffer<int8_t>(kOutputTensor),
649 context->getOutputShape(kOutputTensor));
650 } else if (context->getInputType(kFilterTensor) ==
651 OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
652 return depthwiseConv(context->getInputBuffer<int8_t>(kInputTensor),
653 context->getInputShape(kInputTensor),
654 context->getInputBuffer<int8_t>(kFilterTensor),
655 context->getInputShape(kFilterTensor),
656 context->getInputBuffer<int32_t>(kBiasTensor),
657 context->getInputShape(kBiasTensor), param.padding_left,
658 param.padding_right, param.padding_top, param.padding_bottom,
659 param.stride_width, param.stride_height,
660 param.dilation_width_factor, param.dilation_height_factor,
661 param.depth_multiplier, param.activation, param.useNchw,
662 context->getOutputBuffer<int8_t>(kOutputTensor),
663 context->getOutputShape(kOutputTensor));
664 } else {
665 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
666 }
667 default:
668 NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
669 }
670 }
671
672 } // namespace depthwise_conv_2d
673
674 NN_REGISTER_OPERATION(DEPTHWISE_CONV_2D, depthwise_conv_2d::kOperationName,
675 depthwise_conv_2d::validate, depthwise_conv_2d::prepare,
676 depthwise_conv_2d::execute, .allowZeroSizedInput = true);
677
678 } // namespace nn
679 } // namespace android
680