1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18 #define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19 
20 #include <stdint.h>
21 #include <memory>
22 
23 #include "common_runtime_test.h"
24 #include "handle_scope-inl.h"
25 #include "mirror/array-inl.h"
26 #include "mirror/class-inl.h"
27 #include "mirror/class_loader.h"
28 #include "mirror/object-inl.h"
29 #include "runtime_globals.h"
30 #include "scoped_thread_state_change-inl.h"
31 #include "thread_list.h"
32 #include "zygote_space.h"
33 
34 namespace art {
35 namespace gc {
36 namespace space {
37 
38 template <class Super>
39 class SpaceTest : public Super {
40  public:
41   jobject byte_array_class_ = nullptr;
42 
43   void AddSpace(ContinuousSpace* space, bool revoke = true) {
44     Heap* heap = Runtime::Current()->GetHeap();
45     if (revoke) {
46       heap->RevokeAllThreadLocalBuffers();
47     }
48     {
49       ScopedThreadStateChange sts(Thread::Current(), kSuspended);
50       ScopedSuspendAll ssa("Add image space");
51       heap->AddSpace(space);
52     }
53     heap->SetSpaceAsDefault(space);
54   }
55 
GetByteArrayClass(Thread * self)56   ObjPtr<mirror::Class> GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
57     if (byte_array_class_ == nullptr) {
58       ObjPtr<mirror::Class> byte_array_class =
59           Runtime::Current()->GetClassLinker()->FindSystemClass(self, "[B");
60       EXPECT_TRUE(byte_array_class != nullptr);
61       byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class.Ptr());
62       EXPECT_TRUE(byte_array_class_ != nullptr);
63     }
64     return self->DecodeJObject(byte_array_class_)->AsClass();
65   }
66 
Alloc(space::MallocSpace * alloc_space,Thread * self,size_t bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)67   mirror::Object* Alloc(space::MallocSpace* alloc_space,
68                         Thread* self,
69                         size_t bytes,
70                         size_t* bytes_allocated,
71                         size_t* usable_size,
72                         size_t* bytes_tl_bulk_allocated)
73       REQUIRES_SHARED(Locks::mutator_lock_) {
74     StackHandleScope<1> hs(self);
75     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
76     mirror::Object* obj = alloc_space->Alloc(self,
77                                              bytes,
78                                              bytes_allocated,
79                                              usable_size,
80                                              bytes_tl_bulk_allocated);
81     if (obj != nullptr) {
82       InstallClass(obj, byte_array_class.Get(), bytes);
83     }
84     return obj;
85   }
86 
AllocWithGrowth(space::MallocSpace * alloc_space,Thread * self,size_t bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)87   mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space,
88                                   Thread* self,
89                                   size_t bytes,
90                                   size_t* bytes_allocated,
91                                   size_t* usable_size,
92                                   size_t* bytes_tl_bulk_allocated)
93       REQUIRES_SHARED(Locks::mutator_lock_) {
94     StackHandleScope<1> hs(self);
95     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
96     mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
97                                                        bytes_tl_bulk_allocated);
98     if (obj != nullptr) {
99       InstallClass(obj, byte_array_class.Get(), bytes);
100     }
101     return obj;
102   }
103 
InstallClass(mirror::Object * o,mirror::Class * byte_array_class,size_t size)104   void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
105       REQUIRES_SHARED(Locks::mutator_lock_) {
106     // Note the minimum size, which is the size of a zero-length byte array.
107     EXPECT_GE(size, SizeOfZeroLengthByteArray());
108     EXPECT_TRUE(byte_array_class != nullptr);
109     o->SetClass(byte_array_class);
110     if (kUseBakerReadBarrier) {
111       // Like the proper heap object allocation, install and verify
112       // the correct read barrier state.
113       o->AssertReadBarrierState();
114     }
115     ObjPtr<mirror::Array> arr = o->AsArray<kVerifyNone>();
116     size_t header_size = SizeOfZeroLengthByteArray();
117     int32_t length = size - header_size;
118     arr->SetLength(length);
119     EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
120   }
121 
SizeOfZeroLengthByteArray()122   static size_t SizeOfZeroLengthByteArray() {
123     return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
124   }
125 
126   typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
127                                         size_t initial_size,
128                                         size_t growth_limit,
129                                         size_t capacity);
130 
131   void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
132                                            int round, size_t growth_limit);
133   void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
134 };
135 
test_rand(size_t * seed)136 static inline size_t test_rand(size_t* seed) {
137   *seed = *seed * 1103515245 + 12345;
138   return *seed;
139 }
140 
141 template <class Super>
SizeFootPrintGrowthLimitAndTrimBody(MallocSpace * space,intptr_t object_size,int round,size_t growth_limit)142 void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space,
143                                                            intptr_t object_size,
144                                                            int round,
145                                                            size_t growth_limit) {
146   if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
147       ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
148     // No allocation can succeed
149     return;
150   }
151 
152   // The space's footprint equals amount of resources requested from system
153   size_t footprint = space->GetFootprint();
154 
155   // The space must at least have its book keeping allocated
156   EXPECT_GT(footprint, 0u);
157 
158   // But it shouldn't exceed the initial size
159   EXPECT_LE(footprint, growth_limit);
160 
161   // space's size shouldn't exceed the initial size
162   EXPECT_LE(space->Size(), growth_limit);
163 
164   // this invariant should always hold or else the space has grown to be larger than what the
165   // space believes its size is (which will break invariants)
166   EXPECT_GE(space->Size(), footprint);
167 
168   // Fill the space with lots of small objects up to the growth limit
169   size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
170   std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
171   size_t last_object = 0;  // last object for which allocation succeeded
172   size_t amount_allocated = 0;  // amount of space allocated
173   Thread* self = Thread::Current();
174   ScopedObjectAccess soa(self);
175   size_t rand_seed = 123456789;
176   for (size_t i = 0; i < max_objects; i++) {
177     size_t alloc_fails = 0;  // number of failed allocations
178     size_t max_fails = 30;  // number of times we fail allocation before giving up
179     for (; alloc_fails < max_fails; alloc_fails++) {
180       size_t alloc_size;
181       if (object_size > 0) {
182         alloc_size = object_size;
183       } else {
184         alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
185         // Note the minimum size, which is the size of a zero-length byte array.
186         size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
187         if (alloc_size < size_of_zero_length_byte_array) {
188           alloc_size = size_of_zero_length_byte_array;
189         }
190       }
191       StackHandleScope<1> hs(soa.Self());
192       auto object(hs.NewHandle<mirror::Object>(nullptr));
193       size_t bytes_allocated = 0;
194       size_t bytes_tl_bulk_allocated;
195       if (round <= 1) {
196         object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
197                             &bytes_tl_bulk_allocated));
198       } else {
199         object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
200                                       &bytes_tl_bulk_allocated));
201       }
202       footprint = space->GetFootprint();
203       EXPECT_GE(space->Size(), footprint);  // invariant
204       if (object != nullptr) {  // allocation succeeded
205         lots_of_objects[i] = object.Get();
206         size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
207         EXPECT_EQ(bytes_allocated, allocation_size);
208         if (object_size > 0) {
209           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
210         } else {
211           EXPECT_GE(allocation_size, 8u);
212         }
213         EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
214                     bytes_tl_bulk_allocated >= allocation_size);
215         amount_allocated += allocation_size;
216         break;
217       }
218     }
219     if (alloc_fails == max_fails) {
220       last_object = i;
221       break;
222     }
223   }
224   CHECK_NE(last_object, 0u);  // we should have filled the space
225   EXPECT_GT(amount_allocated, 0u);
226 
227   // We shouldn't have gone past the growth_limit
228   EXPECT_LE(amount_allocated, growth_limit);
229   EXPECT_LE(footprint, growth_limit);
230   EXPECT_LE(space->Size(), growth_limit);
231 
232   // footprint and size should agree with amount allocated
233   EXPECT_GE(footprint, amount_allocated);
234   EXPECT_GE(space->Size(), amount_allocated);
235 
236   // Release storage in a semi-adhoc manner
237   size_t free_increment = 96;
238   while (true) {
239     {
240       ScopedThreadStateChange tsc(self, kNative);
241       // Give the space a haircut.
242       space->Trim();
243     }
244 
245     // Bounds consistency check.
246     footprint = space->GetFootprint();
247     EXPECT_LE(amount_allocated, growth_limit);
248     EXPECT_GE(footprint, amount_allocated);
249     EXPECT_LE(footprint, growth_limit);
250     EXPECT_GE(space->Size(), amount_allocated);
251     EXPECT_LE(space->Size(), growth_limit);
252 
253     if (free_increment == 0) {
254       break;
255     }
256 
257     // Free some objects
258     for (size_t i = 0; i < last_object; i += free_increment) {
259       mirror::Object* object = lots_of_objects.get()[i];
260       if (object == nullptr) {
261         continue;
262       }
263       size_t allocation_size = space->AllocationSize(object, nullptr);
264       if (object_size > 0) {
265         EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
266       } else {
267         EXPECT_GE(allocation_size, 8u);
268       }
269       space->Free(self, object);
270       lots_of_objects.get()[i] = nullptr;
271       amount_allocated -= allocation_size;
272       footprint = space->GetFootprint();
273       EXPECT_GE(space->Size(), footprint);  // invariant
274     }
275 
276     free_increment >>= 1;
277   }
278 
279   // The space has become empty here before allocating a large object
280   // below. For RosAlloc, revoke thread-local runs, which are kept
281   // even when empty for a performance reason, so that they won't
282   // cause the following large object allocation to fail due to
283   // potential fragmentation. Note they are normally revoked at each
284   // GC (but no GC here.)
285   space->RevokeAllThreadLocalBuffers();
286 
287   // All memory was released, try a large allocation to check freed memory is being coalesced
288   StackHandleScope<1> hs(soa.Self());
289   auto large_object(hs.NewHandle<mirror::Object>(nullptr));
290   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
291   size_t bytes_allocated = 0;
292   size_t bytes_tl_bulk_allocated;
293   if (round <= 1) {
294     large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
295                               &bytes_tl_bulk_allocated));
296   } else {
297     large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
298                                         nullptr, &bytes_tl_bulk_allocated));
299   }
300   EXPECT_TRUE(large_object != nullptr);
301 
302   // Consistency check of the footprint.
303   footprint = space->GetFootprint();
304   EXPECT_LE(footprint, growth_limit);
305   EXPECT_GE(space->Size(), footprint);
306   EXPECT_LE(space->Size(), growth_limit);
307 
308   // Clean up.
309   space->Free(self, large_object.Assign(nullptr));
310 
311   // Consistency check of the footprint.
312   footprint = space->GetFootprint();
313   EXPECT_LE(footprint, growth_limit);
314   EXPECT_GE(space->Size(), footprint);
315   EXPECT_LE(space->Size(), growth_limit);
316 }
317 
318 template <class Super>
SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,CreateSpaceFn create_space)319 void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,
320                                                              CreateSpaceFn create_space) {
321   if (object_size < SizeOfZeroLengthByteArray()) {
322     // Too small for the object layout/model.
323     return;
324   }
325   size_t initial_size = 4 * MB;
326   size_t growth_limit = 8 * MB;
327   size_t capacity = 16 * MB;
328   MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
329   ASSERT_TRUE(space != nullptr);
330 
331   // Basic consistency check.
332   EXPECT_EQ(space->Capacity(), growth_limit);
333   EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
334 
335   // Make space findable to the heap, will also delete space when runtime is cleaned up
336   AddSpace(space);
337 
338   // In this round we don't allocate with growth and therefore can't grow past the initial size.
339   // This effectively makes the growth_limit the initial_size, so assert this.
340   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
341   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
342   // Remove growth limit
343   space->ClearGrowthLimit();
344   EXPECT_EQ(space->Capacity(), capacity);
345   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
346 }
347 
348 #define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
349   TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
350     SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
351   }
352 
353 #define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
354   TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
355     SizeFootPrintGrowthLimitAndTrimDriver(-(size), spaceFn); \
356   }
357 
358 #define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
359   class spaceName##StaticTest : public SpaceTest<CommonRuntimeTest> { \
360   }; \
361   \
362   TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
363   TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
364   TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
365   TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
366   TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
367   TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
368   TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
369   TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
370   TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
371   TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
372   TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
373 
374 #define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
375   class spaceName##RandomTest : public SpaceTest<CommonRuntimeTest> { \
376   }; \
377   \
378   TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
379   TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
380   TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
381   TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
382   TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
383   TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
384   TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
385   TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
386   TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
387   TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
388 
389 }  // namespace space
390 }  // namespace gc
391 }  // namespace art
392 
393 #endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
394