1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include "LSHProjection.h"
20
21 #include "CpuExecutor.h"
22 #include "HalInterfaces.h"
23 #include "Tracing.h"
24 #include "Utils.h"
25
26 #include <utils/hash/farmhash.h>
27 #include <memory>
28
29 namespace android {
30 namespace nn {
31
32 using namespace hal;
33
LSHProjection(const Operation & operation,RunTimeOperandInfo * operands)34 LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) {
35 input_ = GetInput(operation, operands, kInputTensor);
36 weight_ = GetInput(operation, operands, kWeightTensor);
37 hash_ = GetInput(operation, operands, kHashTensor);
38
39 type_ = static_cast<LSHProjectionType>(
40 getScalarData<int32_t>(*GetInput(operation, operands, kTypeParam)));
41
42 output_ = GetOutput(operation, operands, kOutputTensor);
43 }
44
Prepare(const Operation & operation,RunTimeOperandInfo * operands,Shape * outputShape)45 bool LSHProjection::Prepare(const Operation& operation, RunTimeOperandInfo* operands,
46 Shape* outputShape) {
47 // Check that none of the required inputs are omitted.
48 constexpr int requiredInputs[] = {kHashTensor, kInputTensor, kTypeParam};
49 for (const int requiredInput : requiredInputs) {
50 NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
51 << "required input " << requiredInput << " is omitted";
52 }
53 NN_CHECK_EQ(NumOutputs(operation), 1);
54
55 const RunTimeOperandInfo* hash = GetInput(operation, operands, kHashTensor);
56 NN_CHECK_EQ(NumDimensions(hash), 2);
57 // Support up to 32 bits.
58 NN_CHECK(SizeOfDimension(hash, 1) <= 32);
59
60 const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
61 NN_CHECK(NumDimensions(input) >= 1);
62
63 const auto& typeOperand = operands[operation.inputs[kTypeParam]];
64 NN_RET_CHECK(typeOperand.length >= sizeof(int32_t));
65 auto type = static_cast<LSHProjectionType>(getScalarData<int32_t>(typeOperand));
66 switch (type) {
67 case LSHProjectionType_SPARSE:
68 case LSHProjectionType_SPARSE_DEPRECATED:
69 NN_CHECK(NumInputsWithValues(operation, operands) == 3);
70 outputShape->dimensions = {SizeOfDimension(hash, 0)};
71 break;
72 case LSHProjectionType_DENSE: {
73 RunTimeOperandInfo* weight = GetInput(operation, operands, kWeightTensor);
74 NN_CHECK_EQ(NumInputsWithValues(operation, operands), 4);
75 NN_CHECK_EQ(NumDimensions(weight), 1);
76 NN_CHECK_EQ(SizeOfDimension(weight, 0), SizeOfDimension(input, 0));
77 outputShape->dimensions = {SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1)};
78 break;
79 }
80 default:
81 return false;
82 }
83
84 outputShape->type = OperandType::TENSOR_INT32;
85 outputShape->offset = 0;
86 outputShape->scale = 0.f;
87
88 return true;
89 }
90
91 // Compute sign bit of dot product of hash(seed, input) and weight.
92 // NOTE: use float as seed, and convert it to double as a temporary solution
93 // to match the trained model. This is going to be changed once the new
94 // model is trained in an optimized method.
95 //
96 template <typename T>
runningSignBit(const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,float seed)97 int runningSignBit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight, float seed) {
98 double score = 0.0;
99 int input_item_bytes = nonExtensionOperandSizeOfData(input->type, input->dimensions) /
100 SizeOfDimension(input, 0);
101 char* input_ptr = (char*)(input->buffer);
102
103 const size_t seed_size = sizeof(seed);
104 const size_t key_bytes = seed_size + input_item_bytes;
105 std::unique_ptr<char[]> key(new char[key_bytes]);
106
107 for (uint32_t i = 0; i < SizeOfDimension(input, 0); ++i) {
108 // Create running hash id and value for current dimension.
109 memcpy(key.get(), &seed, seed_size);
110 memcpy(key.get() + seed_size, input_ptr, input_item_bytes);
111
112 int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes);
113 double running_value = static_cast<double>(hash_signature);
114 input_ptr += input_item_bytes;
115 if (weight->lifetime == OperandLifeTime::NO_VALUE) {
116 score += running_value;
117 } else {
118 score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
119 }
120 }
121
122 return (score > 0) ? 1 : 0;
123 }
124
125 template <typename T>
SparseLshProjection(LSHProjectionType type,const RunTimeOperandInfo * hash,const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,int32_t * out_buf)126 void SparseLshProjection(LSHProjectionType type, const RunTimeOperandInfo* hash,
127 const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight,
128 int32_t* out_buf) {
129 int num_hash = SizeOfDimension(hash, 0);
130 int num_bits = SizeOfDimension(hash, 1);
131 for (int i = 0; i < num_hash; i++) {
132 int32_t hash_signature = 0;
133 for (int j = 0; j < num_bits; j++) {
134 T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
135 int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
136 hash_signature = (hash_signature << 1) | bit;
137 }
138 if (type == LSHProjectionType_SPARSE_DEPRECATED) {
139 *out_buf++ = hash_signature;
140 } else {
141 *out_buf++ = hash_signature + i * (1 << num_bits);
142 }
143 }
144 }
145
146 template <typename T>
DenseLshProjection(const RunTimeOperandInfo * hash,const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,int32_t * out_buf)147 void DenseLshProjection(const RunTimeOperandInfo* hash, const RunTimeOperandInfo* input,
148 const RunTimeOperandInfo* weight, int32_t* out_buf) {
149 int num_hash = SizeOfDimension(hash, 0);
150 int num_bits = SizeOfDimension(hash, 1);
151 for (int i = 0; i < num_hash; i++) {
152 for (int j = 0; j < num_bits; j++) {
153 T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
154 int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
155 *out_buf++ = bit;
156 }
157 }
158 }
159
160 template <typename T>
Eval()161 bool LSHProjection::Eval() {
162 NNTRACE_COMP("LSHProjection::Eval");
163
164 int32_t* out_buf = reinterpret_cast<int32_t*>(output_->buffer);
165
166 switch (type_) {
167 case LSHProjectionType_DENSE:
168 DenseLshProjection<T>(hash_, input_, weight_, out_buf);
169 break;
170 case LSHProjectionType_SPARSE:
171 case LSHProjectionType_SPARSE_DEPRECATED:
172 SparseLshProjection<T>(type_, hash_, input_, weight_, out_buf);
173 break;
174 default:
175 return false;
176 }
177 return true;
178 }
179
180 template bool LSHProjection::Eval<float>();
181 template bool LSHProjection::Eval<_Float16>();
182
183 template int runningSignBit<float>(const RunTimeOperandInfo* input,
184 const RunTimeOperandInfo* weight, float seed);
185 template int runningSignBit<_Float16>(const RunTimeOperandInfo* input,
186 const RunTimeOperandInfo* weight, float seed);
187
188 template void SparseLshProjection<float>(LSHProjectionType type, const RunTimeOperandInfo* hash,
189 const RunTimeOperandInfo* input,
190 const RunTimeOperandInfo* weight, int32_t* outBuffer);
191 template void SparseLshProjection<_Float16>(LSHProjectionType type, const RunTimeOperandInfo* hash,
192 const RunTimeOperandInfo* input,
193 const RunTimeOperandInfo* weight, int32_t* outBuffer);
194
195 template void DenseLshProjection<float>(const RunTimeOperandInfo* hash,
196 const RunTimeOperandInfo* input,
197 const RunTimeOperandInfo* weight, int32_t* outBuffer);
198 template void DenseLshProjection<_Float16>(const RunTimeOperandInfo* hash,
199 const RunTimeOperandInfo* input,
200 const RunTimeOperandInfo* weight, int32_t* outBuffer);
201
202 } // namespace nn
203 } // namespace android
204