/frameworks/ml/nn/runtime/test/specs/V1_3/ |
D | fully_connected_quant8_signed.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 10}, 0.5f, -1", variable 26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0) 42 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128", [-118, -108, -108, -1… variable 46 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 61 weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # num_units = 1, input_si… variable 65 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 70 weights: 84 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128", [-126]) variable 88 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 103 weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128") variable [all …]
|
D | unidirectional_sequence_rnn.mod.py | 19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument 25 model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights, 31 weights: weights_data, 181 weights=Input("weights", "TENSOR_FLOAT32", 207 weights=Input("weights", "TENSOR_FLOAT32",
|
/frameworks/base/services/core/java/com/android/server/display/whitebalance/ |
D | AmbientFilter.java | 208 final float[] weights = getWeights(time, buffer); in filter() local 210 Slog.v(mTag, "filter: " + buffer + " => " + Arrays.toString(weights)); in filter() 212 for (int i = 0; i < weights.length; i++) { in filter() 214 final float weight = weights[i]; in filter() 231 float[] weights = new float[buffer.size()]; in getWeights() local 234 for (int i = 1; i < weights.length; i++) { in getWeights() 237 weights[i - 1] = weight; in getWeights() 242 weights[weights.length - 1] = lastWeight; in getWeights() 243 return weights; in getWeights()
|
/frameworks/ml/nn/common/operations/ |
D | QuantizedLSTM.cpp | 211 uint8_t* weights) { in assignWeightsSubmatrix() argument 217 weights[(row + offset_row) * weightsDims[1] + column + offset_column] = submatrixValues[i]; in assignWeightsSubmatrix() 269 auto checkWeightsShape = [&](const RunTimeOperandInfo* weights, uint32_t columns) -> bool { in prepare() argument 270 NN_RET_CHECK_EQ(NumDimensions(weights), 2); in prepare() 271 NN_RET_CHECK_EQ(SizeOfDimension(weights, 0), outputSize); in prepare() 272 NN_RET_CHECK_EQ(SizeOfDimension(weights, 1), columns); in prepare() 273 NN_RET_CHECK_EQ(weights->scale, weightsScale); in prepare() 274 NN_RET_CHECK_EQ(weights->zeroPoint, weightsZeroPoint); in prepare() 353 uint8_t* weights) { in concatenateWeights() argument 356 assignWeightsSubmatrix(inputToInputWeights_, 0 * outputSize, outputSize, weightsDims, weights); in concatenateWeights() [all …]
|
D | FullyConnected.cpp | 183 bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias, in validateShapes() argument 187 NN_RET_CHECK(weights.type == input.type); in validateShapes() 198 NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2); in validateShapes() 201 uint32_t num_units = getSizeOfDimension(weights, 0); in validateShapes() 202 uint32_t input_size = getSizeOfDimension(weights, 1); in validateShapes() 283 Shape weights = context->getInputShape(kWeightsTensor); in validate() local 285 if (hasKnownRank(input) && hasKnownRank(weights) && hasKnownRank(bias)) { in validate() 286 NN_RET_CHECK(validateShapes(input, weights, bias)); in validate() 294 Shape weights = context->getInputShape(kWeightsTensor); in prepare() local 297 NN_RET_CHECK(validateShapes(input, weights, bias, &output)); in prepare()
|
/frameworks/base/libs/hwui/utils/ |
D | Blur.cpp | 61 void Blur::generateGaussianWeights(float* weights, float radius) { in generateGaussianWeights() argument 83 weights[r + intRadius] = coeff1 * pow(e, floatR * floatR * coeff2); in generateGaussianWeights() 84 normalizeFactor += weights[r + intRadius]; in generateGaussianWeights() 90 weights[r + intRadius] *= normalizeFactor; in generateGaussianWeights() 94 void Blur::horizontal(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest, in horizontal() argument 105 const float* gPtr = weights; in horizontal() 137 void Blur::vertical(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest, in vertical() argument 147 const float* gPtr = weights; in vertical()
|
D | Blur.h | 37 static void generateGaussianWeights(float* weights, float radius); 38 static void horizontal(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest, 40 static void vertical(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest,
|
/frameworks/ml/nn/runtime/test/ |
D | TestMemory.cpp | 55 WrapperMemory weights(offsetForMatrix3 + sizeof(matrix3), PROT_READ, fd, 0); in TEST_F() local 56 ASSERT_TRUE(weights.isValid()); in TEST_F() 69 model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4)); in TEST_F() 70 model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4)); in TEST_F() 113 WrapperMemory weights(buffer); in TEST_F() local 114 ASSERT_TRUE(weights.isValid()); in TEST_F() 127 model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4)); in TEST_F() 128 model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4)); in TEST_F()
|
/frameworks/ml/nn/runtime/test/specs/V1_2/ |
D | unidirectional_sequence_rnn.mod.py | 19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument 24 model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights, 29 weights: weights_data, 143 weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format( 165 weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(
|
D | rnn_float16.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT16", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | fully_connected_v1_2.mod.py | 20 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2]) variable 24 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: ("TENSOR_QUANT8_ASYMM", 0.5, 120),
|
/frameworks/ml/nn/runtime/test/specs/V1_0/ |
D | fully_connected_quant8_large_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights:
|
D | fully_connected_float_large_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights:
|
D | fully_connected_quant8_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: [2],
|
D | fully_connected_float_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: [2],
|
D | rnn_state.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | rnn.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | fully_connected_float_large.mod.py | 19 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 5}", [2, 3, 4, 5, 6]) # num_units = 1, input_size… variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
D | fully_connected_quant8_2.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{3, 10}, 0.5f, 127", variable 26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
|
D | fully_connected_float.mod.py | 19 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2]) variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
D | fully_connected_quant8_large.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0", [10, 20, 20, 20, 10]) # num_uni… variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
/frameworks/ml/nn/runtime/test/specs/V1_1/ |
D | fully_connected_float_large_weights_as_inputs_relaxed.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 29 weights:
|
D | fully_connected_float_weights_as_inputs_relaxed.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 29 weights: [2],
|
D | rnn_state_relaxed.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 39 weights: [
|
D | rnn_relaxed.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 39 weights: [
|