/bionic/tests/ |
D | threads_test.cpp | 31 #if __has_include(<threads.h>) 66 TEST(threads, call_once) { in TEST() argument 80 TEST(threads, cnd_broadcast__cnd_wait) { in TEST() argument 116 TEST(threads, cnd_init__cnd_destroy) { in TEST() argument 126 TEST(threads, cnd_signal__cnd_wait) { in TEST() argument 176 TEST(threads, cnd_timedwait_timedout) { in TEST() argument 192 TEST(threads, cnd_timedwait) { in TEST() argument 222 TEST(threads, mtx_init) { in TEST() argument 236 TEST(threads, mtx_destroy) { in TEST() argument 246 TEST(threads, mtx_lock_plain) { in TEST() argument [all …]
|
D | malloc_stress_test.cpp | 51 std::vector<std::thread*> threads; in TEST() local 53 threads.push_back(new std::thread([]() { in TEST() 65 for (auto thread : threads) { in TEST() 69 threads.clear(); in TEST()
|
D | leak_test.cpp | 134 struct thread_data { pthread_barrier_t* barrier; pid_t* tid; } threads[thread_count]; in TEST() local 142 threads[i] = {&barrier, &tids[i]}; in TEST() 150 ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &threads[i])); in TEST()
|
D | ifaddrs_test.cpp | 278 std::vector<std::thread*> threads; in TEST() local 280 threads.push_back(new std::thread([]() { in TEST() 286 for (auto& t : threads) { in TEST()
|
D | malloc_test.cpp | 903 std::vector<std::thread*> threads; in TEST() local 918 threads.push_back(t); in TEST() 942 for (auto thread : threads) { in TEST() 1196 pthread_t threads[kNumThreads]; in SetAllocationLimitMultipleThreads() local 1198 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go)); in SetAllocationLimitMultipleThreads() 1212 ASSERT_EQ(0, pthread_join(threads[i], &result)); in SetAllocationLimitMultipleThreads()
|
D | pthread_test.cpp | 2702 std::vector<pthread_t> threads(data.thread_count); in TEST() local 2703 std::vector<BarrierTestHelperArg> args(threads.size()); in TEST() 2704 for (size_t i = 0; i < threads.size(); ++i) { in TEST() 2707 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST() 2710 for (size_t i = 0; i < threads.size(); ++i) { in TEST() 2711 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST() 2773 std::vector<pthread_t> threads(THREAD_COUNT); in TEST() local 2780 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST() 2785 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST()
|
/bionic/libc/async_safe/ |
D | README.md | 6 it among threads, whereas these functions connect to liblog for each log message. While it's 10 threads. Therefore, we maintain these two separate mechanisms.
|
/bionic/tools/versioner/src/ |
D | Driver.cpp | 213 std::vector<std::thread> threads; in initializeTargetCC1FlagCache() local 215 threads.emplace_back([type, &vfs, &reqs]() { in initializeTargetCC1FlagCache() 225 for (auto& thread : threads) { in initializeTargetCC1FlagCache()
|
D | versioner.cpp | 194 std::vector<std::thread> threads; in compileHeaders() local 241 threads.emplace_back([&jobs, &job_index, &result, vfs]() { in compileHeaders() 255 for (auto& thread : threads) { in compileHeaders() 258 threads.clear(); in compileHeaders()
|
/bionic/tests/headers/posix/ |
D | threads_h.c | 29 #if __has_include(<threads.h>)
|
/bionic/docs/ |
D | fdsan.md | 10 For example, given two threads running the following code: 109 std::vector<std::thread> threads; 111 threads.emplace_back(function); 113 for (auto& thread : threads) { 119 When running the program, the threads' executions will be interleaved as follows:
|
D | native_allocator.md | 33 This function, when called, should pause all threads that are making a 35 is made to `malloc_enable`, the paused threads should start running again. 318 mechanism will simulate this by creating threads and replaying the operations 321 in all threads since it collapses all of the allocation operations to occur 322 one after another. This will cause a lot of threads allocating at the same
|
D | elf-tls.md | 221 `dlopen` can initialize the new static TLS memory in all existing threads. A thread list could be 455 `thread_local` | - C11: a macro for `_Thread_local` via `threads.h`<br/> - C++11: a keyword, allo… 562 On the other hand, maybe lazy allocation is a feature, because not all threads will use a dlopen'ed 567 > up the process. It would be a waste of memory and time to allocate the storage for all threads. A 570 > alternative to stopping all threads and allocating storage for all threads before letting them run
|
D | status.md | 54 * Full C11 `<threads.h>` (available as inlines for older API levels).
|
/bionic/libc/malloc_debug/tests/ |
D | malloc_debug_unit_tests.cpp | 1031 std::vector<std::thread*> threads(1000); in TEST_F() local 1032 for (size_t i = 0; i < threads.size(); i++) { in TEST_F() 1033 threads[i] = new std::thread([](){ in TEST_F() 1041 for (size_t i = 0; i < threads.size(); i++) { in TEST_F() 1042 threads[i]->join(); in TEST_F() 1043 delete threads[i]; in TEST_F()
|
/bionic/libc/ |
D | Android.bp | 1150 "bionic/threads.cpp",
|