1#
2# Copyright (C) 2018 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17# TEST 1: PRELU
18i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
19a1 = Parameter("alpha", "TENSOR_FLOAT32", "{1, 1, 3}", [0, 1, 2])
20o1 = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
21Model().Operation("PRELU", i1, a1).To(o1)
22
23# output.scale > input.scale && output.scale > input.scale * alpha.scale
24quant8_gt = DataTypeConverter().Identify({
25    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
26    a1: ("TENSOR_QUANT8_ASYMM", 0.25, 50),
27    o1: ("TENSOR_QUANT8_ASYMM", 0.5, 120)
28})
29
30# output.scale == input.scale
31quant8_eq1 = DataTypeConverter().Identify({
32    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
33    a1: ("TENSOR_QUANT8_ASYMM", 0.25, 50),
34    o1: ("TENSOR_QUANT8_ASYMM", 0.25, 120)
35})
36
37# output.scale == input.scale * alpha.scale
38quant8_eq2 = DataTypeConverter().Identify({
39    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
40    a1: ("TENSOR_QUANT8_ASYMM", 0.5, 50),
41    o1: ("TENSOR_QUANT8_ASYMM", 0.125, 120)
42})
43
44# output.scale < input.scale && output.scale < input.scale * alpha.scale
45quant8_lt = DataTypeConverter().Identify({
46    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
47    a1: ("TENSOR_QUANT8_ASYMM", 0.5, 50),
48    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 120)
49})
50
51# Instantiate an example
52Example({
53    i1: [ 0,  0,  0,
54          1,  1,  1,
55         -1, -1, -1,
56         -2, -2, -2],
57    o1: [ 0,  0,  0,
58          1,  1,  1,
59          0, -1, -2,
60          0, -2, -4]
61}).AddVariations("relaxed", quant8_gt, quant8_eq1, quant8_eq2, quant8_lt, "float16")
62