1 /*
2 * Copyright 2017, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <algorithm>
18 #include <iostream>
19 #include <string>
20
21 #include "clang/AST/APValue.h"
22
23 #include "slang_assert.h"
24 #include "slang_rs_export_foreach.h"
25 #include "slang_rs_export_func.h"
26 #include "slang_rs_export_reduce.h"
27 #include "slang_rs_export_type.h"
28 #include "slang_rs_export_var.h"
29 #include "slang_rs_reflection.h"
30 #include "slang_rs_reflection_state.h"
31
32 #include "bcinfo/MetadataExtractor.h"
33
34 namespace slang {
35
equal(const clang::APValue & a,const clang::APValue & b)36 static bool equal(const clang::APValue &a, const clang::APValue &b) {
37 if (a.getKind() != b.getKind())
38 return false;
39 switch (a.getKind()) {
40 case clang::APValue::Float:
41 return a.getFloat().bitwiseIsEqual(b.getFloat());
42 case clang::APValue::Int:
43 return a.getInt() == b.getInt();
44 case clang::APValue::Vector: {
45 unsigned NumElements = a.getVectorLength();
46 if (NumElements != b.getVectorLength())
47 return false;
48 for (unsigned i = 0; i < NumElements; ++i) {
49 if (!equal(a.getVectorElt(i), b.getVectorElt(i)))
50 return false;
51 }
52 return true;
53 }
54 default:
55 slangAssert(false && "unexpected APValue kind");
56 return false;
57 }
58 }
59
~ReflectionState()60 ReflectionState::~ReflectionState() {
61 slangAssert(mState==S_Initial || mState==S_ClosedJava64 || mState==S_Bad);
62 delete mStringSet;
63 }
64
openJava32(size_t NumFiles)65 void ReflectionState::openJava32(size_t NumFiles) {
66 if (kDisabled)
67 return;
68 slangAssert(mState==S_Initial);
69 mState = S_OpenJava32;
70 mStringSet = new llvm::StringSet<>;
71 mFiles.BeginCollecting(NumFiles);
72 }
73
closeJava32()74 void ReflectionState::closeJava32() {
75 if (kDisabled)
76 return;
77 slangAssert(mState==S_OpenJava32 && (mForEachOpen < 0) && !mOutputClassOpen && (mRecordsState != RS_Open));
78 mState = S_ClosedJava32;
79 mRSC = nullptr;
80 }
81
openJava64()82 void ReflectionState::openJava64() {
83 if (kDisabled)
84 return;
85 slangAssert(mState==S_ClosedJava32);
86 mState = S_OpenJava64;
87 mFiles.BeginUsing();
88 }
89
closeJava64()90 void ReflectionState::closeJava64() {
91 if (kDisabled)
92 return;
93 slangAssert(mState==S_OpenJava64 && (mForEachOpen < 0) && !mOutputClassOpen && (mRecordsState != RS_Open));
94 mState = S_ClosedJava64;
95 mRSC = nullptr;
96 }
97
canon(const std::string & String)98 llvm::StringRef ReflectionState::canon(const std::string &String) {
99 slangAssert(isCollecting());
100 // NOTE: llvm::StringSet does not permit the empty string as a member
101 return String.empty() ? llvm::StringRef() : mStringSet->insert(String).first->getKey();
102 }
103
getUniqueTypeName(const RSExportType * T)104 std::string ReflectionState::getUniqueTypeName(const RSExportType *T) {
105 return RSReflectionJava::GetTypeName(T, RSReflectionJava::TypeNamePseudoC);
106 }
107
nextFile(const RSContext * RSC,const std::string & PackageName,const std::string & RSSourceFileName)108 void ReflectionState::nextFile(const RSContext *RSC,
109 const std::string &PackageName,
110 const std::string &RSSourceFileName) {
111 slangAssert(!isClosed());
112 if (!isActive())
113 return;
114
115 mRSC = RSC;
116
117 slangAssert(mRecordsState != RS_Open);
118 mRecordsState = RS_Initial;
119
120 if (isCollecting()) {
121 File &file = mFiles.CollectNext();
122 file.mPackageName = PackageName;
123 file.mRSSourceFileName = RSSourceFileName;
124 }
125 if (isUsing()) {
126 File &file = mFiles.UseNext();
127 slangAssert(file.mRSSourceFileName == RSSourceFileName);
128 if (file.mPackageName != PackageName)
129 mRSC->ReportError("in file '%0' Java package name is '%1' for 32-bit targets "
130 "but '%2' for 64-bit targets")
131 << RSSourceFileName << file.mPackageName << PackageName;
132 }
133 }
134
dump()135 void ReflectionState::dump() {
136 const size_t NumFiles = mFiles.Size();
137 for (int i = 0; i < NumFiles; ++i) {
138 const File &file = mFiles[i];
139 std::cout << "file = \"" << file.mRSSourceFileName << "\", "
140 << "package = \"" << file.mPackageName << "\"" << std::endl;
141
142 // NOTE: "StringMap iteration order, however, is not guaranteed to
143 // be deterministic". So sort before dumping.
144 typedef const llvm::StringMap<File::Record>::MapEntryTy *RecordsEntryTy;
145 std::vector<RecordsEntryTy> Records;
146 Records.reserve(file.mRecords.size());
147 for (auto I = file.mRecords.begin(), E = file.mRecords.end(); I != E; I++)
148 Records.push_back(&(*I));
149 std::sort(Records.begin(), Records.end(),
150 [](RecordsEntryTy a, RecordsEntryTy b) { return a->getKey().compare(b->getKey())==-1; });
151 for (auto Record : Records) {
152 const auto &Val = Record->getValue();
153 std::cout << " (Record) name=\"" << Record->getKey().str() << "\""
154 << " allocSize=" << Val.mAllocSize
155 << " postPadding=" << Val.mPostPadding
156 << " ordinary=" << Val.mOrdinary
157 << " matchedByName=" << Val.mMatchedByName
158 << std::endl;
159 const size_t NumFields = Val.mFieldCount;
160 for (int fieldIdx = 0; fieldIdx < NumFields; ++fieldIdx) {
161 const auto &field = Val.mFields[fieldIdx];
162 std::cout << " (Field) name=\"" << field.mName << "\" ("
163 << field.mPrePadding << ", \"" << field.mType.str()
164 << "\"(" << field.mStoreSize << ")@" << field.mOffset
165 << ", " << field.mPostPadding << ")" << std::endl;
166 }
167 }
168
169 const size_t NumVars = file.mVariables.Size();
170 for (int varIdx = 0; varIdx < NumVars; ++varIdx) {
171 const auto &var = file.mVariables[varIdx];
172 std::cout << " (Var) name=\"" << var.mName << "\" type=\"" << var.mType.str()
173 << "\" const=" << var.mIsConst << " initialized=" << (var.mInitializerCount != 0)
174 << " allocSize=" << var.mAllocSize << std::endl;
175 }
176
177 for (int feIdx = 0; feIdx < file.mForEachCount; ++feIdx) {
178 const auto &fe = file.mForEaches[feIdx];
179 std::cout << " (ForEach) ordinal=" << feIdx << " state=";
180 switch (fe.mState) {
181 case File::ForEach::S_Initial:
182 std::cout << "initial" << std::endl;
183 continue;
184 case File::ForEach::S_Collected:
185 std::cout << "collected";
186 break;
187 case File::ForEach::S_UseMatched:
188 std::cout << "usematched";
189 break;
190 default:
191 std::cout << fe.mState;
192 break;
193 }
194 std::cout << " name=\"" << fe.mName << "\" kernel=" << fe.mIsKernel
195 << " hasOut=" << fe.mHasOut << " out=\"" << fe.mOut.str()
196 << "\" metadata=0x" << std::hex << fe.mSignatureMetadata << std::dec
197 << std::endl;
198 const size_t NumIns = fe.mIns.Size();
199 for (int insIdx = 0; insIdx < NumIns; ++insIdx)
200 std::cout << " (In) " << fe.mIns[insIdx].str() << std::endl;
201 const size_t NumParams = fe.mParams.Size();
202 for (int paramsIdx = 0; paramsIdx < NumParams; ++paramsIdx)
203 std::cout << " (Param) " << fe.mParams[paramsIdx].str() << std::endl;
204 }
205
206 for (auto feBad : mForEachesBad) {
207 std::cout << " (ForEachBad) ordinal=" << feBad->getOrdinal()
208 << " name=\"" << feBad->getName() << "\""
209 << std::endl;
210 }
211
212 const size_t NumInvokables = file.mInvokables.Size();
213 for (int invIdx = 0; invIdx < NumInvokables; ++invIdx) {
214 const auto &inv = file.mInvokables[invIdx];
215 std::cout << " (Invokable) name=\"" << inv.mName << "\"" << std::endl;
216 const size_t NumParams = inv.mParamCount;
217 for (int paramsIdx = 0; paramsIdx < NumParams; ++paramsIdx)
218 std::cout << " (Param) " << inv.mParams[paramsIdx].str() << std::endl;
219 }
220
221 const size_t NumReduces = file.mReduces.Size();
222 for (int redIdx = 0; redIdx < NumReduces; ++redIdx) {
223 const auto &red = file.mReduces[redIdx];
224 std::cout << " (Reduce) name=\"" << red.mName
225 << "\" result=\"" << red.mResult.str()
226 << "\" exportable=" << red.mIsExportable
227 << std::endl;
228 const size_t NumIns = red.mAccumInCount;
229 for (int insIdx = 0; insIdx < NumIns; ++insIdx)
230 std::cout << " (In) " << red.mAccumIns[insIdx].str() << std::endl;
231 }
232 }
233 }
234
235 // ForEach /////////////////////////////////////////////////////////////////////////////////////
236
beginForEaches(size_t Count)237 void ReflectionState::beginForEaches(size_t Count) {
238 slangAssert(!isClosed());
239 if (!isActive())
240 return;
241
242 if (isCollecting()) {
243 auto &file = mFiles.Current();
244 file.mForEaches = new File::ForEach[Count];
245 file.mForEachCount = Count;
246 }
247 if (isUsing()) {
248 slangAssert(mForEachesBad.empty());
249 mNumForEachesMatchedByOrdinal = 0;
250 }
251 }
252
253 // Keep this in sync with RSReflectionJava::genExportForEach().
beginForEach(const RSExportForEach * EF)254 void ReflectionState::beginForEach(const RSExportForEach *EF) {
255 slangAssert(!isClosed() && (mForEachOpen < 0));
256 if (!isActive())
257 return;
258
259 const bool IsKernel = EF->isKernelStyle();
260 const std::string& Name = EF->getName();
261 const unsigned Ordinal = EF->getOrdinal();
262 const size_t InCount = EF->getInTypes().size();
263 const size_t ParamCount = EF->params_count();
264
265 const RSExportType *OET = EF->getOutType();
266 if (OET && !IsKernel) {
267 slangAssert(OET->getClass() == RSExportType::ExportClassPointer);
268 OET = static_cast<const RSExportPointerType *>(OET)->getPointeeType();
269 }
270 const std::string OutType = (OET ? getUniqueTypeName(OET) : "");
271 const bool HasOut = (EF->hasOut() || EF->hasReturn());
272
273 mForEachOpen = Ordinal;
274 mForEachFatal = true; // we'll set this to false if everything looks ok
275
276 auto &file = mFiles.Current();
277 auto &foreaches = file.mForEaches;
278 if (isCollecting()) {
279 slangAssert(Ordinal < file.mForEachCount);
280 auto &foreach = foreaches[Ordinal];
281 slangAssert(foreach.mState == File::ForEach::S_Initial);
282 foreach.mState = File::ForEach::S_Collected;
283 foreach.mName = Name;
284 foreach.mIns.BeginCollecting(InCount);
285 foreach.mParams.BeginCollecting(ParamCount);
286 foreach.mOut = canon(OutType);
287 foreach.mHasOut = HasOut;
288 foreach.mSignatureMetadata = 0;
289 foreach.mIsKernel = IsKernel;
290 }
291 if (isUsing()) {
292 if (Ordinal >= file.mForEachCount) {
293 mForEachesBad.push_back(EF);
294 return;
295 }
296
297 auto &foreach = foreaches[Ordinal];
298 slangAssert(foreach.mState == File::ForEach::S_Collected);
299 foreach.mState = File::ForEach::S_UseMatched;
300 ++mNumForEachesMatchedByOrdinal;
301
302 if (foreach.mName != Name) {
303 // Order matters because it determines slot number
304 mForEachesBad.push_back(EF);
305 return;
306 }
307
308 // At this point, we have matching ordinal and matching name.
309
310 if (foreach.mIsKernel != IsKernel) {
311 mRSC->ReportError(EF->getLocation(),
312 "foreach kernel '%0' has __attribute__((kernel)) for %select{32|64}1-bit targets "
313 "but not for %select{64|32}1-bit targets")
314 << Name << IsKernel;
315 return;
316 }
317
318 if ((foreach.mHasOut != HasOut) || !foreach.mOut.equals(OutType)) {
319 // There are several different patterns we need to handle:
320 // (1) Two different non-void* output types
321 // (2) One non-void* output type, one void* output type
322 // (3) One non-void* output type, one no-output
323 // (4) One void* output type, one no-output
324 if (foreach.mHasOut && HasOut) {
325 if (foreach.mOut.size() && OutType.size()) {
326 // (1) Two different non-void* output types
327 mRSC->ReportError(EF->getLocation(),
328 "foreach kernel '%0' has output type '%1' for 32-bit targets "
329 "but output type '%2' for 64-bit targets")
330 << Name << foreach.mOut.str() << OutType;
331 } else {
332 // (2) One non-void* return type, one void* output type
333 const bool hasTyped64 = OutType.size();
334 mRSC->ReportError(EF->getLocation(),
335 "foreach kernel '%0' has output type '%1' for %select{32|64}2-bit targets "
336 "but has untyped output for %select{64|32}2-bit targets")
337 << Name << (foreach.mOut.str() + OutType) << hasTyped64;
338 }
339 } else {
340 const std::string CombinedOutType = (foreach.mOut.str() + OutType);
341 if (CombinedOutType.size()) {
342 // (3) One non-void* output type, one no-output
343 mRSC->ReportError(EF->getLocation(),
344 "foreach kernel '%0' has output type '%1' for %select{32|64}2-bit targets "
345 "but no output for %select{64|32}2-bit targets")
346 << Name << CombinedOutType << HasOut;
347 } else {
348 // (4) One void* output type, one no-output
349 mRSC->ReportError(EF->getLocation(),
350 "foreach kernel '%0' has untyped output for %select{32|64}1-bit targets "
351 "but no output for %select{64|32}1-bit targets")
352 << Name << HasOut;
353 }
354 }
355 }
356
357 bool BadCount = false;
358 if (foreach.mIns.Size() != InCount) {
359 mRSC->ReportError(EF->getLocation(),
360 "foreach kernel '%0' has %1 input%s1 for 32-bit targets "
361 "but %2 input%s2 for 64-bit targets")
362 << Name << unsigned(foreach.mIns.Size()) << unsigned(InCount);
363 BadCount = true;
364 }
365 if (foreach.mParams.Size() != ParamCount) {
366 mRSC->ReportError(EF->getLocation(),
367 "foreach kernel '%0' has %1 usrData parameter%s1 for 32-bit targets "
368 "but %2 usrData parameter%s2 for 64-bit targets")
369 << Name << unsigned(foreach.mParams.Size()) << unsigned(ParamCount);
370 BadCount = true;
371 }
372
373 if (BadCount)
374 return;
375
376 foreach.mIns.BeginUsing();
377 foreach.mParams.BeginUsing();
378 }
379
380 mForEachFatal = false;
381 }
382
addForEachIn(const RSExportForEach * EF,const RSExportType * Type)383 void ReflectionState::addForEachIn(const RSExportForEach *EF, const RSExportType *Type) {
384 slangAssert(!isClosed());
385 if (!isActive())
386 return;
387
388 slangAssert(mForEachOpen == EF->getOrdinal());
389
390 // Type may be nullptr in the case of void*. See RSExportForEach::Create().
391 if (Type && !EF->isKernelStyle()) {
392 slangAssert(Type->getClass() == RSExportType::ExportClassPointer);
393 Type = static_cast<const RSExportPointerType *>(Type)->getPointeeType();
394 }
395 const std::string TypeName = (Type ? getUniqueTypeName(Type) : std::string());
396
397 auto &ins = mFiles.Current().mForEaches[EF->getOrdinal()].mIns;
398 if (isCollecting()) {
399 ins.CollectNext() = canon(TypeName);
400 }
401 if (isUsing()) {
402 if (mForEachFatal)
403 return;
404
405 if (!ins.UseNext().equals(TypeName)) {
406 if (ins.Current().size() && TypeName.size()) {
407 mRSC->ReportError(EF->getLocation(),
408 "%ordinal0 input of foreach kernel '%1' "
409 "has type '%2' for 32-bit targets "
410 "but type '%3' for 64-bit targets")
411 << unsigned(ins.CurrentIdx() + 1)
412 << EF->getName()
413 << ins.Current().str()
414 << TypeName;
415 } else {
416 const bool hasType64 = TypeName.size();
417 mRSC->ReportError(EF->getLocation(),
418 "%ordinal0 input of foreach kernel '%1' "
419 "has type '%2' for %select{32|64}3-bit targets "
420 "but is untyped for %select{64|32}3-bit targets")
421 << unsigned(ins.CurrentIdx() + 1)
422 << EF->getName()
423 << (ins.Current().str() + TypeName)
424 << hasType64;
425 }
426 }
427 }
428 }
429
addForEachParam(const RSExportForEach * EF,const RSExportType * Type)430 void ReflectionState::addForEachParam(const RSExportForEach *EF, const RSExportType *Type) {
431 slangAssert(!isClosed());
432 if (!isActive())
433 return;
434
435 slangAssert(mForEachOpen == EF->getOrdinal());
436
437 const std::string TypeName = getUniqueTypeName(Type);
438
439 auto ¶ms = mFiles.Current().mForEaches[EF->getOrdinal()].mParams;
440 if (isCollecting()) {
441 params.CollectNext() = canon(TypeName);
442 }
443 if (isUsing()) {
444 if (mForEachFatal)
445 return;
446
447 if (!params.UseNext().equals(TypeName)) {
448 mRSC->ReportError(EF->getLocation(),
449 "%ordinal0 usrData parameter of foreach kernel '%1' "
450 "has type '%2' for 32-bit targets "
451 "but type '%3' for 64-bit targets")
452 << unsigned(params.CurrentIdx() + 1)
453 << EF->getName()
454 << params.Current().str()
455 << TypeName;
456 }
457 }
458 }
459
addForEachSignatureMetadata(const RSExportForEach * EF,unsigned Metadata)460 void ReflectionState::addForEachSignatureMetadata(const RSExportForEach *EF, unsigned Metadata) {
461 slangAssert(!isClosed());
462 if (!isActive())
463 return;
464
465 slangAssert(mForEachOpen == EF->getOrdinal());
466
467 // These are properties in the metadata that we need to check.
468 const unsigned SpecialParameterBits = bcinfo::MD_SIG_X|bcinfo::MD_SIG_Y|bcinfo::MD_SIG_Z|bcinfo::MD_SIG_Ctxt;
469
470 #ifndef __DISABLE_ASSERTS
471 {
472 // These are properties in the metadata that we already check in
473 // some other way.
474 const unsigned BoringBits = bcinfo::MD_SIG_In|bcinfo::MD_SIG_Out|bcinfo::MD_SIG_Usr|bcinfo::MD_SIG_Kernel;
475
476 slangAssert((Metadata & ~(SpecialParameterBits | BoringBits)) == 0);
477 }
478 #endif
479
480 auto &mSignatureMetadata = mFiles.Current().mForEaches[EF->getOrdinal()].mSignatureMetadata;
481 if (isCollecting()) {
482 mSignatureMetadata = Metadata;
483 }
484 if (isUsing()) {
485 if (mForEachFatal)
486 return;
487
488 if ((mSignatureMetadata & SpecialParameterBits) != (Metadata & SpecialParameterBits)) {
489 mRSC->ReportError(EF->getLocation(),
490 "foreach kernel '%0' has different special parameters "
491 "for 32-bit targets than for 64-bit targets")
492 << EF->getName();
493 }
494 }
495 }
496
endForEach()497 void ReflectionState::endForEach() {
498 slangAssert(!isClosed());
499 if (!isActive())
500 return;
501
502 slangAssert(mForEachOpen >= 0);
503 if (isUsing() && !mForEachFatal) {
504 slangAssert(mFiles.Current().mForEaches[mForEachOpen].mIns.isFinished());
505 slangAssert(mFiles.Current().mForEaches[mForEachOpen].mParams.isFinished());
506 }
507
508 mForEachOpen = -1;
509 }
510
endForEaches()511 void ReflectionState::endForEaches() {
512 slangAssert(mForEachOpen < 0);
513 if (!isUsing())
514 return;
515
516 const auto &file = mFiles.Current();
517
518 if (!mForEachesBad.empty()) {
519 std::sort(mForEachesBad.begin(), mForEachesBad.end(),
520 [](const RSExportForEach *a, const RSExportForEach *b) { return a->getOrdinal() < b->getOrdinal(); });
521 // Note that after the sort, all kernels that are bad because of
522 // name mismatch precede all kernels that are bad because of
523 // too-high ordinal.
524
525 // 32-bit and 64-bit compiles need to see foreach kernels in the
526 // same order, because of slot number assignment. Once we see the
527 // first name mismatch in the sequence of foreach kernels, it
528 // doesn't make sense to issue further diagnostics regarding
529 // foreach kernels except those that still happen to match by name
530 // and ordinal (we already handled those diagnostics between
531 // beginForEach() and endForEach()).
532 bool ForEachesOrderFatal = false;
533
534 for (const RSExportForEach *EF : mForEachesBad) {
535 if (EF->getOrdinal() >= file.mForEachCount) {
536 mRSC->ReportError(EF->getLocation(),
537 "foreach kernel '%0' is only present for 64-bit targets")
538 << EF->getName();
539 } else {
540 mRSC->ReportError(EF->getLocation(),
541 "%ordinal0 foreach kernel is '%1' for 32-bit targets "
542 "but '%2' for 64-bit targets")
543 << (EF->getOrdinal() + 1)
544 << mFiles.Current().mForEaches[EF->getOrdinal()].mName
545 << EF->getName();
546 ForEachesOrderFatal = true;
547 break;
548 }
549 }
550
551 mForEachesBad.clear();
552
553 if (ForEachesOrderFatal)
554 return;
555 }
556
557 if (mNumForEachesMatchedByOrdinal == file.mForEachCount)
558 return;
559 for (unsigned ord = 0; ord < file.mForEachCount; ord++) {
560 const auto &fe = file.mForEaches[ord];
561 if (fe.mState == File::ForEach::S_Collected) {
562 mRSC->ReportError("in file '%0' foreach kernel '%1' is only present for 32-bit targets")
563 << file.mRSSourceFileName << fe.mName;
564 }
565 }
566 }
567
568 // Invokable ///////////////////////////////////////////////////////////////////////////////////
569
570 // Keep this in sync with RSReflectionJava::genExportFunction().
declareInvokable(const RSExportFunc * EF)571 void ReflectionState::declareInvokable(const RSExportFunc *EF) {
572 slangAssert(!isClosed());
573 if (!isActive())
574 return;
575
576 const std::string& Name = EF->getName(/*Mangle=*/false);
577 const size_t ParamCount = EF->getNumParameters();
578
579 auto &invokables = mFiles.Current().mInvokables;
580 if (isCollecting()) {
581 auto &invokable = invokables.CollectNext();
582 invokable.mName = Name;
583 invokable.mParamCount = ParamCount;
584 if (EF->hasParam()) {
585 unsigned FieldIdx = 0;
586 invokable.mParams = new llvm::StringRef[ParamCount];
587 for (RSExportFunc::const_param_iterator I = EF->params_begin(),
588 E = EF->params_end();
589 I != E; I++, FieldIdx++) {
590 invokable.mParams[FieldIdx] = canon(getUniqueTypeName((*I)->getType()));
591 }
592 }
593 }
594 if (isUsing()) {
595 if (mInvokablesOrderFatal)
596 return;
597
598 if (invokables.isFinished()) {
599 // This doesn't actually break reflection, but that's a
600 // coincidence of the fact that we reflect during the 64-bit
601 // compilation pass rather than the 32-bit compilation pass, and
602 // of the fact that the "extra" invokable(s) are at the end.
603 mRSC->ReportError(EF->getLocation(),
604 "invokable function '%0' is only present for 64-bit targets")
605 << Name;
606 return;
607 }
608
609 auto &invokable = invokables.UseNext();
610
611 if (invokable.mName != Name) {
612 // Order matters because it determines slot number
613 mRSC->ReportError(EF->getLocation(),
614 "%ordinal0 invokable function is '%1' for 32-bit targets "
615 "but '%2' for 64-bit targets")
616 << unsigned(invokables.CurrentIdx() + 1)
617 << invokable.mName
618 << Name;
619 mInvokablesOrderFatal = true;
620 return;
621 }
622
623 if (invokable.mParamCount != ParamCount) {
624 mRSC->ReportError(EF->getLocation(),
625 "invokable function '%0' has %1 parameter%s1 for 32-bit targets "
626 "but %2 parameter%s2 for 64-bit targets")
627 << Name << unsigned(invokable.mParamCount) << unsigned(ParamCount);
628 return;
629 }
630 if (EF->hasParam()) {
631 unsigned FieldIdx = 0;
632 for (RSExportFunc::const_param_iterator I = EF->params_begin(),
633 E = EF->params_end();
634 I != E; I++, FieldIdx++) {
635 const std::string Type = getUniqueTypeName((*I)->getType());
636 if (!invokable.mParams[FieldIdx].equals(Type)) {
637 mRSC->ReportError(EF->getLocation(),
638 "%ordinal0 parameter of invokable function '%1' "
639 "has type '%2' for 32-bit targets "
640 "but type '%3' for 64-bit targets")
641 << (FieldIdx + 1)
642 << Name
643 << invokable.mParams[FieldIdx].str()
644 << Type;
645 }
646 }
647 }
648 }
649 }
650
endInvokables()651 void ReflectionState::endInvokables() {
652 if (!isUsing() || mInvokablesOrderFatal)
653 return;
654
655 auto &invokables = mFiles.Current().mInvokables;
656 while (!invokables.isFinished()) {
657 const auto &invokable = invokables.UseNext();
658 mRSC->ReportError("in file '%0' invokable function '%1' is only present for 32-bit targets")
659 << mFiles.Current().mRSSourceFileName << invokable.mName;
660 }
661 }
662
663 // Record //////////////////////////////////////////////////////////////////////////////////////
664
beginRecords()665 void ReflectionState::beginRecords() {
666 slangAssert(!isClosed());
667 if (!isActive())
668 return;
669
670 slangAssert(mRecordsState != RS_Open);
671 mRecordsState = RS_Open;
672 mNumRecordsMatchedByName = 0;
673 }
674
endRecords()675 void ReflectionState::endRecords() {
676 slangAssert(!isClosed());
677 if (!isActive())
678 return;
679
680 slangAssert(mRecordsState == RS_Open);
681 mRecordsState = RS_Closed;
682
683 if (isUsing()) {
684 const File &file = mFiles.Current();
685 if (mNumRecordsMatchedByName == file.mRecords.size())
686 return;
687 // NOTE: "StringMap iteration order, however, is not guaranteed to
688 // be deterministic". So sort by name before reporting.
689 // Alternatively, if we record additional information, we could
690 // sort by source location or by order in which we discovered the
691 // need to export.
692 std::vector<llvm::StringRef> Non64RecordNames;
693 for (auto I = file.mRecords.begin(), E = file.mRecords.end(); I != E; I++)
694 if (!I->getValue().mMatchedByName && I->getValue().mOrdinary)
695 Non64RecordNames.push_back(I->getKey());
696 std::sort(Non64RecordNames.begin(), Non64RecordNames.end(),
697 [](llvm::StringRef a, llvm::StringRef b) { return a.compare(b)==-1; });
698 for (auto N : Non64RecordNames)
699 mRSC->ReportError("in file '%0' structure '%1' is exported only for 32-bit targets")
700 << file.mRSSourceFileName << N.str();
701 }
702 }
703
declareRecord(const RSExportRecordType * ERT,bool Ordinary)704 void ReflectionState::declareRecord(const RSExportRecordType *ERT, bool Ordinary) {
705 slangAssert(!isClosed());
706 if (!isActive())
707 return;
708
709 slangAssert(mRecordsState == RS_Open);
710
711 auto &records = mFiles.Current().mRecords;
712 if (isCollecting()) {
713 // Keep struct/field layout in sync with
714 // RSReflectionJava::genPackVarOfType() and
715 // RSReflectionJavaElementBuilder::genAddElement()
716
717 // Save properties of record
718
719 const size_t FieldCount = ERT->fields_size();
720 File::Record::Field *Fields = new File::Record::Field[FieldCount];
721
722 size_t Pos = 0; // Relative position of field within record
723 unsigned FieldIdx = 0;
724 for (RSExportRecordType::const_field_iterator I = ERT->fields_begin(), E = ERT->fields_end();
725 I != E; I++, FieldIdx++) {
726 const RSExportRecordType::Field *FieldExport = *I;
727 size_t FieldOffset = FieldExport->getOffsetInParent();
728 const RSExportType *T = FieldExport->getType();
729 size_t FieldStoreSize = T->getStoreSize();
730 size_t FieldAllocSize = T->getAllocSize();
731
732 slangAssert(FieldOffset >= Pos);
733 slangAssert(FieldAllocSize >= FieldStoreSize);
734
735 auto &FieldState = Fields[FieldIdx];
736 FieldState.mName = FieldExport->getName();
737 FieldState.mType = canon(getUniqueTypeName(T));
738 FieldState.mPrePadding = FieldOffset - Pos;
739 FieldState.mPostPadding = FieldAllocSize - FieldStoreSize;
740 FieldState.mOffset = FieldOffset;
741 FieldState.mStoreSize = FieldStoreSize;
742
743 Pos = FieldOffset + FieldAllocSize;
744 }
745
746 slangAssert(ERT->getAllocSize() >= Pos);
747
748 // Insert record into map
749
750 slangAssert(records.find(ERT->getName()) == records.end());
751 File::Record &record = records[ERT->getName()];
752 record.mFields = Fields;
753 record.mFieldCount = FieldCount;
754 record.mPostPadding = ERT->getAllocSize() - Pos;
755 record.mAllocSize = ERT->getAllocSize();
756 record.mOrdinary = Ordinary;
757 record.mMatchedByName = false;
758 }
759 if (isUsing()) {
760 if (!Ordinary)
761 return;
762
763 const auto RIT = records.find(ERT->getName());
764 if (RIT == records.end()) {
765 // This doesn't actually break reflection, but that's a
766 // coincidence of the fact that we reflect during the 64-bit
767 // compilation pass rather than the 32-bit compilation pass, so
768 // a record that's only classified as exported during the 64-bit
769 // compilation pass doesn't cause any problems.
770 mRSC->ReportError(ERT->getLocation(), "structure '%0' is exported only for 64-bit targets")
771 << ERT->getName();
772 return;
773 }
774 File::Record &record = RIT->getValue();
775 record.mMatchedByName = true;
776 ++mNumRecordsMatchedByName;
777 slangAssert(record.mOrdinary);
778
779 if (ERT->fields_size() != record.mFieldCount) {
780 mRSC->ReportError(ERT->getLocation(),
781 "exported structure '%0' has %1 field%s1 for 32-bit targets "
782 "but %2 field%s2 for 64-bit targets")
783 << ERT->getName() << unsigned(record.mFieldCount) << unsigned(ERT->fields_size());
784 return;
785 }
786
787 // Note that we are deliberately NOT comparing layout properties
788 // (such as Field offsets and sizes, or Record allocation size);
789 // we need to tolerate layout differences between 32-bit
790 // compilation and 64-bit compilation.
791
792 unsigned FieldIdx = 0;
793 for (RSExportRecordType::const_field_iterator I = ERT->fields_begin(), E = ERT->fields_end();
794 I != E; I++, FieldIdx++) {
795 const RSExportRecordType::Field &FieldExport = **I;
796 const File::Record::Field &FieldState = record.mFields[FieldIdx];
797 if (FieldState.mName != FieldExport.getName()) {
798 mRSC->ReportError(ERT->getLocation(),
799 "%ordinal0 field of exported structure '%1' "
800 "is '%2' for 32-bit targets "
801 "but '%3' for 64-bit targets")
802 << (FieldIdx + 1) << ERT->getName() << FieldState.mName << FieldExport.getName();
803 return;
804 }
805 const std::string FieldExportType = getUniqueTypeName(FieldExport.getType());
806 if (!FieldState.mType.equals(FieldExportType)) {
807 mRSC->ReportError(ERT->getLocation(),
808 "field '%0' of exported structure '%1' "
809 "has type '%2' for 32-bit targets "
810 "but type '%3' for 64-bit targets")
811 << FieldState.mName << ERT->getName() << FieldState.mType.str() << FieldExportType;
812 }
813 }
814 }
815 }
816
817 ReflectionState::Record32
getRecord32(const RSExportRecordType * ERT)818 ReflectionState::getRecord32(const RSExportRecordType *ERT) {
819 if (isUsing()) {
820 const auto &Records = mFiles.Current().mRecords;
821 const auto RIT = Records.find(ERT->getName());
822 if (RIT != Records.end())
823 return Record32(&RIT->getValue());
824 }
825 return Record32();
826 }
827
828 // Reduce //////////////////////////////////////////////////////////////////////////////////////
829
declareReduce(const RSExportReduce * ER,bool IsExportable)830 void ReflectionState::declareReduce(const RSExportReduce *ER, bool IsExportable) {
831 slangAssert(!isClosed());
832 if (!isActive())
833 return;
834
835 auto &reduces = mFiles.Current().mReduces;
836 if (isCollecting()) {
837 auto &reduce = reduces.CollectNext();
838 reduce.mName = ER->getNameReduce();
839
840 const auto &InTypes = ER->getAccumulatorInTypes();
841 const size_t InTypesSize = InTypes.size();
842 reduce.mAccumInCount = InTypesSize;
843 reduce.mAccumIns = new llvm::StringRef[InTypesSize];
844 unsigned InTypesIdx = 0;
845 for (const auto &InType : InTypes)
846 reduce.mAccumIns[InTypesIdx++] = canon(getUniqueTypeName(InType));
847
848 reduce.mResult = canon(getUniqueTypeName(ER->getResultType()));
849 reduce.mIsExportable = IsExportable;
850 }
851 if (isUsing()) {
852 if (mReducesOrderFatal)
853 return;
854
855 const std::string& Name = ER->getNameReduce();
856
857 if (reduces.isFinished()) {
858 // This doesn't actually break reflection, but that's a
859 // coincidence of the fact that we reflect during the 64-bit
860 // compilation pass rather than the 32-bit compilation pass, and
861 // of the fact that the "extra" reduction kernel(s) are at the
862 // end.
863 mRSC->ReportError(ER->getLocation(),
864 "reduction kernel '%0' is only present for 64-bit targets")
865 << Name;
866 return;
867 }
868
869 auto &reduce = reduces.UseNext();
870
871 if (reduce.mName != Name) {
872 // Order matters because it determines slot number. We might be
873 // able to tolerate certain cases if we ignore non-exportable
874 // kernels in the two sequences (32-bit and 64-bit) -- non-exportable
875 // kernels do not take up slot numbers.
876 mRSC->ReportError(ER->getLocation(),
877 "%ordinal0 reduction kernel is '%1' for 32-bit targets "
878 "but '%2' for 64-bit targets")
879 << unsigned(reduces.CurrentIdx() + 1)
880 << reduce.mName
881 << Name;
882 mReducesOrderFatal = true;
883 return;
884 }
885
886 // If at least one of the two kernels (32-bit or 64-bit) is not
887 // exporable, then there will be no reflection for that kernel,
888 // and so any mismatch in result type or in inputs is irrelevant.
889 // However, we may make more kernels exportable in the future.
890 // Therefore, we'll forbid mismatches anyway.
891
892 if (reduce.mIsExportable != IsExportable) {
893 mRSC->ReportError(ER->getLocation(),
894 "reduction kernel '%0' is reflected in Java only for %select{32|64}1-bit targets")
895 << reduce.mName
896 << IsExportable;
897 }
898
899 const std::string ResultType = getUniqueTypeName(ER->getResultType());
900 if (!reduce.mResult.equals(ResultType)) {
901 mRSC->ReportError(ER->getLocation(),
902 "reduction kernel '%0' has result type '%1' for 32-bit targets "
903 "but result type '%2' for 64-bit targets")
904 << reduce.mName << reduce.mResult.str() << ResultType;
905 }
906
907 const auto &InTypes = ER->getAccumulatorInTypes();
908 if (reduce.mAccumInCount != InTypes.size()) {
909 mRSC->ReportError(ER->getLocation(),
910 "reduction kernel '%0' has %1 input%s1 for 32-bit targets "
911 "but %2 input%s2 for 64-bit targets")
912 << Name << unsigned(reduce.mAccumInCount) << unsigned(InTypes.size());
913 return;
914 }
915 unsigned FieldIdx = 0;
916 for (const auto &InType : InTypes) {
917 const std::string InTypeName = getUniqueTypeName(InType);
918 const llvm::StringRef StateInTypeName = reduce.mAccumIns[FieldIdx++];
919 if (!StateInTypeName.equals(InTypeName)) {
920 mRSC->ReportError(ER->getLocation(),
921 "%ordinal0 input of reduction kernel '%1' "
922 "has type '%2' for 32-bit targets "
923 "but type '%3' for 64-bit targets")
924 << FieldIdx
925 << Name
926 << StateInTypeName.str()
927 << InTypeName;
928 }
929 }
930 }
931 }
932
endReduces()933 void ReflectionState::endReduces() {
934 if (!isUsing() || mReducesOrderFatal)
935 return;
936
937 auto &reduces = mFiles.Current().mReduces;
938 while (!reduces.isFinished()) {
939 const auto &reduce = reduces.UseNext();
940 mRSC->ReportError("in file '%0' reduction kernel '%1' is only present for 32-bit targets")
941 << mFiles.Current().mRSSourceFileName << reduce.mName;
942 }
943 }
944
945 // Variable ////////////////////////////////////////////////////////////////////////////////////
946
947 // Keep this in sync with initialization handling in
948 // RSReflectionJava::genScriptClassConstructor().
declareVariable(const RSExportVar * EV)949 ReflectionState::Val32 ReflectionState::declareVariable(const RSExportVar *EV) {
950 slangAssert(!isClosed());
951 if (!isActive())
952 return NoVal32();
953
954 auto &variables = mFiles.Current().mVariables;
955 if (isCollecting()) {
956 auto &variable = variables.CollectNext();
957 variable.mName = EV->getName();
958 variable.mType = canon(getUniqueTypeName(EV->getType()));
959 variable.mAllocSize = EV->getType()->getAllocSize();
960 variable.mIsConst = EV->isConst();
961 if (!EV->getInit().isUninit()) {
962 variable.mInitializerCount = 1;
963 variable.mInitializers = new clang::APValue[1];
964 variable.mInitializers[0] = EV->getInit();
965 } else if (EV->getArraySize()) {
966 variable.mInitializerCount = EV->getNumInits();
967 variable.mInitializers = new clang::APValue[variable.mInitializerCount];
968 for (size_t i = 0; i < variable.mInitializerCount; ++i)
969 variable.mInitializers[i] = EV->getInitArray(i);
970 } else {
971 variable.mInitializerCount = 0;
972 }
973 return NoVal32();
974 }
975
976 /*-- isUsing() -----------------------------------------------------------*/
977
978 slangAssert(isUsing());
979
980 if (mVariablesOrderFatal)
981 return NoVal32();
982
983 if (variables.isFinished()) {
984 // This doesn't actually break reflection, but that's a
985 // coincidence of the fact that we reflect during the 64-bit
986 // compilation pass rather than the 32-bit compilation pass, and
987 // of the fact that the "extra" variable(s) are at the end.
988 mRSC->ReportError(EV->getLocation(), "global variable '%0' is only present for 64-bit targets")
989 << EV->getName();
990 return NoVal32();
991 }
992
993 const auto &variable = variables.UseNext();
994
995 if (variable.mName != EV->getName()) {
996 // Order matters because it determines slot number
997 mRSC->ReportError(EV->getLocation(),
998 "%ordinal0 global variable is '%1' for 32-bit targets "
999 "but '%2' for 64-bit targets")
1000 << unsigned(variables.CurrentIdx() + 1)
1001 << variable.mName
1002 << EV->getName();
1003 mVariablesOrderFatal = true;
1004 return NoVal32();
1005 }
1006
1007 const std::string TypeName = getUniqueTypeName(EV->getType());
1008
1009 if (!variable.mType.equals(TypeName)) {
1010 mRSC->ReportError(EV->getLocation(),
1011 "global variable '%0' has type '%1' for 32-bit targets "
1012 "but type '%2' for 64-bit targets")
1013 << EV->getName()
1014 << variable.mType.str()
1015 << TypeName;
1016 return NoVal32();
1017 }
1018
1019 if (variable.mIsConst != EV->isConst()) {
1020 mRSC->ReportError(EV->getLocation(),
1021 "global variable '%0' has inconsistent 'const' qualification "
1022 "between 32-bit targets and 64-bit targets")
1023 << EV->getName();
1024 return NoVal32();
1025 }
1026
1027 // NOTE: Certain syntactically different but semantically
1028 // equivalent initialization patterns are unnecessarily rejected
1029 // as errors.
1030 //
1031 // Background:
1032 //
1033 // . A vector initialized with a scalar value is treated
1034 // by reflection as if all elements of the vector are
1035 // initialized with the scalar value.
1036 // . A vector may be initialized with a vector of greater
1037 // length; reflection ignores the extra initializers.
1038 // . If only the beginning of a vector is explicitly
1039 // initialized, reflection treats it as if trailing elements are
1040 // initialized to zero (by issuing explicit assignments to those
1041 // trailing elements).
1042 // . If only the beginning of an array is explicitly initialized,
1043 // reflection treats it as if trailing elements are initialized
1044 // to zero (by Java rules for newly-created arrays).
1045 //
1046 // Unnecessarily rejected as errors:
1047 //
1048 // . One compile initializes a vector with a scalar, and
1049 // another initializes it with a vector whose elements
1050 // are the scalar, as in
1051 //
1052 // int2 x =
1053 // #ifdef __LP64__
1054 // 1
1055 // #else
1056 // { 1, 1 }
1057 // #endif
1058 //
1059 // . Compiles initialize a vector with vectors of different
1060 // lengths, but the initializers agree up to the length
1061 // of the variable being initialized, as in
1062 //
1063 // int2 x = { 1, 2
1064 // #ifdef __LP64__
1065 // 3
1066 // #else
1067 // 4
1068 // #endif
1069 // };
1070 //
1071 // . Two compiles agree with the initializer for a vector or
1072 // array, except that one has some number of explicit trailing
1073 // zeroes, as in
1074 //
1075 // int x[4] = { 3, 2, 1
1076 // #ifdef __LP64__
1077 // , 0
1078 // #endif
1079 // };
1080
1081 bool MismatchedInitializers = false;
1082 if (!EV->getInit().isUninit()) {
1083 // Use phase has a scalar initializer.
1084 // Make sure that Collect phase had a matching scalar initializer.
1085 if ((variable.mInitializerCount != 1) ||
1086 !equal(variable.mInitializers[0], EV->getInit()))
1087 MismatchedInitializers = true;
1088 } else if (EV->getArraySize()) {
1089 const size_t UseSize = EV->getNumInits();
1090 if (variable.mInitializerCount != UseSize)
1091 MismatchedInitializers = true;
1092 else {
1093 for (int i = 0; i < UseSize; ++i)
1094 if (!equal(variable.mInitializers[i], EV->getInitArray(i))) {
1095 MismatchedInitializers = true;
1096 break;
1097 }
1098 }
1099 } else if (variable.mInitializerCount != 0) {
1100 // Use phase does not have a scalar initializer, variable is not
1101 // an array, and Collect phase has an initializer. This is an error.
1102 MismatchedInitializers = true;
1103 }
1104
1105 if (MismatchedInitializers) {
1106 mRSC->ReportError(EV->getLocation(),
1107 "global variable '%0' is initialized differently for 32-bit targets "
1108 "than for 64-bit targets")
1109 << EV->getName();
1110 return NoVal32();
1111 }
1112
1113 return Val32(true, variable.mAllocSize);
1114 }
1115
endVariables()1116 void ReflectionState::endVariables() {
1117 if (!isUsing() || mVariablesOrderFatal)
1118 return;
1119
1120 auto &variables = mFiles.Current().mVariables;
1121 while (!variables.isFinished()) {
1122 const auto &variable = variables.UseNext();
1123 mRSC->ReportError("in file '%0' global variable '%1' is only present for 32-bit targets")
1124 << mFiles.Current().mRSSourceFileName << variable.mName;
1125 }
1126 }
1127
1128 } // namespace slang
1129