1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifdef LOG_TAG
18 #undef LOG_TAG
19 #endif
20 #define LOG_TAG "VSoCGrallocRegionRegistry"
21 // Ensure verbose messages appear even on release builds
22 #define LOG_NDEBUG 0
23 
24 #include <limits.h>
25 #include <errno.h>
26 #include <pthread.h>
27 #include <unistd.h>
28 #include <string.h>
29 
30 #include <sys/mman.h>
31 #include <sys/stat.h>
32 #include <sys/types.h>
33 
34 #include <cutils/hashmap.h>
35 #include <log/log.h>
36 #include <cutils/atomic.h>
37 
38 #include <linux/ashmem.h>
39 
40 #include <hardware/hardware.h>
41 #include <hardware/gralloc.h>
42 #include <system/graphics.h>
43 
44 #include "gralloc_vsoc_priv.h"
45 
46 #include <deque>
47 #include <map>
48 #include <mutex>
49 
50 static const bool g_log_maps = false;
51 static const bool g_log_refs = false;
52 
53 struct GrallocRegion {
54   void* base_;
55   int   num_references_;
56 
GrallocRegionGrallocRegion57   GrallocRegion() : base_(0), num_references_(0) { }
58   // Copy constructors are ok.
59 };
60 
61 
get_buffer_name(const private_handle_t * hnd,char output[ASHMEM_NAME_LEN])62 static const char* get_buffer_name(
63     const private_handle_t* hnd, char output[ASHMEM_NAME_LEN]) {
64   output[0] = '\0';
65   if (!hnd) {
66     ALOGE("Attempted to log gralloc name hnd=NULL");
67     return output;
68   }
69   if (hnd->fd == -1) {
70     ALOGE("Attempted to log gralloc name hnd=%p with fd == -1", hnd);
71     return output;
72   }
73   int rval = ioctl(hnd->fd, ASHMEM_GET_NAME, output);
74   if (rval == -1) {
75     output[0] = '\0';
76   }
77   return output;
78 }
79 
80 
str_hash(void * str)81 static int str_hash(void* str) {
82   return hashmapHash(str, strlen(reinterpret_cast<const char*>(str)));
83 }
84 
85 
str_equal(void * a,void * b)86 static bool str_equal(void* a, void* b) {
87   return strcmp(
88       reinterpret_cast<const char*>(a),
89       reinterpret_cast<const char*>(b)) == 0;
90 }
91 
92 
get_regions()93 static Hashmap* get_regions() {
94   static Hashmap* regionMap = hashmapCreate(19, str_hash, str_equal);
95   return regionMap;
96 }
97 
98 
lock_region_for_handle(const private_handle_t * hnd,char region_name[ASHMEM_NAME_LEN])99 static GrallocRegion* lock_region_for_handle(
100     const private_handle_t* hnd, char region_name[ASHMEM_NAME_LEN]) {
101   region_name[0] = '\0';
102   get_buffer_name(hnd, region_name);
103   Hashmap* hash = get_regions();
104   hashmapLock(hash);
105   GrallocRegion* region = reinterpret_cast<GrallocRegion*>(
106       hashmapGet(hash, region_name));
107   if (!region) {
108     region = new GrallocRegion;
109     hashmapPut(hash, strdup(region_name), region);
110   }
111   return region;
112 }
113 
114 
115 /* The current implementation uses only a single lock for all regions.
116  * This method takes a region to simplfy the refactoring if we go to
117  * finer-grained locks.
118  */
unlock_region(GrallocRegion *)119 static inline void unlock_region(GrallocRegion* ) {
120   hashmapUnlock(get_regions());
121 }
122 
123 
124 /*
125  * surface_flinger can drop its last reference to a gralloc buffer (from the
126  * gralloc HAL's point of view) even though it also has work in flight to the
127  * GPU for that target. This causes segfaults in the swiftshader code.
128  *
129  * We create a compromise solution. On unmap we release the pages by mmaping
130  * anonymous memory over the range, but we don't release the address space.
131  * Instead we mark the address space for recycling into a new gralloc buffer.
132  * This means that the shaders can still write, that the writes won't land in
133  * the gralloc buffer, and the gralloc buffer memory can be released.
134  *
135  * When we're preparing to mmap a new gralloc buffer we see if we can recycle
136  * address space from a prior gralloc buffer.
137  *
138  * The protects the application layer from stray memory writes and pointer
139  * references to freed memory. It does mean that bad pixel data can land in
140  * a buffer in the case of a fast map-unmap-map sequence. However, that
141  * could also happen on a physical GPU.
142  *
143  * The alternative to this would be to create an elaborate reference counting
144  * mechanism below both gralloc and SwiftShader. However, we want to keep the
145  * SwiftShader code clean, so that seems undesirable.
146  *
147  * This problem also comes up for physical GPUs b/62267886. Background fo rthis
148  * solution is in b/118777601
149  */
150 
151 static std::map<size_t, std::deque<void*>> g_recycled_addrs;
152 std::mutex g_recycled_addrs_mutex;
153 
154 
155 
recycle_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)156 static void* recycle_mmap(void *addr, size_t length, int prot, int flags,
157                           int fd, off_t offset) {
158   if (!addr) {
159     std::lock_guard<std::mutex> guard(g_recycled_addrs_mutex);
160     auto it = g_recycled_addrs.find(length);
161     if (it != g_recycled_addrs.end()) {
162       if (it->second.size()) {
163         addr = it->second.front();
164         flags |= MAP_FIXED;
165         it->second.pop_front();
166       }
167     }
168   }
169   return mmap(addr, length, prot, flags, fd, offset);
170 }
171 
172 
recycle_munmap(void * addr,size_t length)173 static int recycle_munmap(void *addr, size_t length) {
174   // Do this first so we don't hold the mutex during the syscall
175   if (addr != mmap(addr, length, PROT_READ|PROT_WRITE,
176                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0)) {
177     // Be conservative. Don't recycle here.
178     return -1;
179   }
180   std::lock_guard<std::mutex> guard(g_recycled_addrs_mutex);
181   g_recycled_addrs[length].push_back(addr);
182   return 0;
183 }
184 
185 
reference_region(const char * op,const private_handle_t * hnd)186 void* reference_region(const char* op, const private_handle_t* hnd) {
187   char name_buf[ASHMEM_NAME_LEN];
188   GrallocRegion* region = lock_region_for_handle(hnd, name_buf);
189   if (!region->base_) {
190     void* mappedAddress = recycle_mmap(
191         0, hnd->total_size, PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0);
192     if (mappedAddress == MAP_FAILED) {
193       ALOGE("Could not mmap %s", strerror(errno));
194       unlock_region(region);
195       return NULL;
196     }
197     // Set up the guard pages. The last page is always a guard
198     uintptr_t base = uintptr_t(mappedAddress);
199     uintptr_t addr = base + hnd->total_size - PAGE_SIZE;
200     if (mprotect((void*)addr, PAGE_SIZE, PROT_NONE) == -1) {
201       ALOGE("mprotect base=%p, pg=%p failed (%s)", (void*)base, (void*)addr,
202             strerror(errno));
203     }
204     region->base_ = mappedAddress;
205     ALOGV_IF(g_log_maps, "Mapped %s hnd=%p fd=%d base=%p format=%s(0x%x) "
206               "width=%d height=%d stride_in_pixels=%d total_size=%d",
207           name_buf, hnd, hnd->fd, region->base_,
208           pixel_format_to_string(hnd->format), hnd->format,
209           hnd->x_res, hnd->y_res, hnd->stride_in_pixels, hnd->total_size);
210   }
211 
212   void* rval = region->base_;
213   ++region->num_references_;
214   ALOGV_IF(g_log_refs, "Referencing name=%s op=%s addr=%p new numRefs=%d",
215            name_buf, op, region->base_, region->num_references_);
216   unlock_region(region);
217   return rval;
218 }
219 
220 
unreference_region(const char * op,const private_handle_t * hnd)221 int unreference_region(const char* op, const private_handle_t* hnd) {
222   char name_buf[ASHMEM_NAME_LEN];
223 
224   GrallocRegion* region = lock_region_for_handle(hnd, name_buf);
225   if (!region->base_) {
226     ALOGE("Unmapping region with no map hnd=%p", hnd);
227     unlock_region(region);
228     return -1;
229   }
230   if (region->num_references_ < 1) {
231     ALOGE(
232         "unmap with hnd=%p, numReferences=%d", hnd, region->num_references_);
233     unlock_region(region);
234     return -1;
235   }
236   --region->num_references_;
237   if (!region->num_references_) {
238     ALOGV_IF(g_log_maps, "Unmapped %s hnd=%p fd=%d base=%p", name_buf, hnd,
239              hnd->fd, region->base_);
240     if (recycle_munmap(region->base_, hnd->total_size) < 0) {
241       ALOGE("Could not unmap %s", strerror(errno));
242     }
243     region->base_ = 0;
244   }
245   ALOGV_IF(g_log_refs, "Unreferencing name=%s op=%s addr=%p new numRefs=%d",
246            name_buf, op, region->base_, region->num_references_);
247   unlock_region(region);
248   return 0;
249 }
250