1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <platform.h>
18 #include <eventQ.h>
19 #include <stddef.h>
20 #include <timer.h>
21 #include <stdio.h>
22 #include <heap.h>
23 #include <slab.h>
24 #include <cpu.h>
25 #include <util.h>
26 #include <plat/plat.h>
27 #include <plat/taggedPtr.h>
28
29 #define for_each_item_safe(head, pos, tmp) \
30 for (pos = (head)->next; tmp = (pos)->next, (pos) != (head); pos = (tmp))
31
32 struct EvtList
33 {
34 struct EvtList *next;
35 struct EvtList *prev;
36 };
37
38 struct EvtRecord {
39 struct EvtList item;
40 uint32_t evtType;
41 void* evtData;
42 TaggedPtr evtFreeData;
43 };
44
45 struct EvtQueue {
46 struct EvtList head;
47 struct SlabAllocator *evtsSlab;
48 EvtQueueForciblyDiscardEvtCbkF forceDiscardCbk;
49 };
50
__evtListDel(struct EvtList * prev,struct EvtList * next)51 static inline void __evtListDel(struct EvtList *prev, struct EvtList *next)
52 {
53 next->prev = prev;
54 prev->next = next;
55 }
56
evtListDel(struct EvtList * entry)57 static inline void evtListDel(struct EvtList *entry)
58 {
59 __evtListDel(entry->prev, entry->next);
60 entry->next = entry->prev = NULL;
61 }
62
evtQueueAlloc(uint32_t size,EvtQueueForciblyDiscardEvtCbkF forceDiscardCbk)63 struct EvtQueue* evtQueueAlloc(uint32_t size, EvtQueueForciblyDiscardEvtCbkF forceDiscardCbk)
64 {
65 struct EvtQueue *q = heapAlloc(sizeof(struct EvtQueue));
66 struct SlabAllocator *slab = slabAllocatorNew(sizeof(struct EvtRecord),
67 alignof(struct EvtRecord), size);
68
69 if (q && slab) {
70 q->forceDiscardCbk = forceDiscardCbk;
71 q->evtsSlab = slab;
72 q->head.next = &q->head;
73 q->head.prev = &q->head;
74 return q;
75 }
76
77 if (q)
78 heapFree(q);
79 if (slab)
80 slabAllocatorDestroy(slab);
81
82 return NULL;
83 }
84
evtQueueFree(struct EvtQueue * q)85 void evtQueueFree(struct EvtQueue* q)
86 {
87 struct EvtList *pos, *tmp;
88
89 for_each_item_safe (&q->head, pos, tmp) {
90 struct EvtRecord * rec = container_of(pos, struct EvtRecord, item);
91
92 q->forceDiscardCbk(rec->evtType, rec->evtData, rec->evtFreeData);
93 slabAllocatorFree(q->evtsSlab, rec);
94 }
95
96 slabAllocatorDestroy(q->evtsSlab);
97 heapFree(q);
98 }
99
evtQueueEnqueue(struct EvtQueue * q,uint32_t evtType,void * evtData,TaggedPtr evtFreeData,bool atFront)100 bool evtQueueEnqueue(struct EvtQueue* q, uint32_t evtType, void *evtData,
101 TaggedPtr evtFreeData, bool atFront)
102 {
103 struct EvtRecord *rec;
104 uint64_t intSta;
105 struct EvtList *item = NULL, *a, *b;
106
107 if (!q)
108 return false;
109
110 rec = slabAllocatorAlloc(q->evtsSlab);
111 if (!rec) {
112 struct EvtList *pos;
113
114 intSta = cpuIntsOff();
115 //find a victim for discarding
116 for (pos = q->head.next; pos != &q->head; pos = pos->next) {
117 rec = container_of(pos, struct EvtRecord, item);
118 if (!(rec->evtType & EVENT_TYPE_BIT_DISCARDABLE))
119 continue;
120 q->forceDiscardCbk(rec->evtType, rec->evtData, rec->evtFreeData);
121 evtListDel(pos);
122 item = pos;
123 }
124 cpuIntsRestore (intSta);
125 } else {
126 item = &rec->item;
127 }
128
129 if (!item)
130 return false;
131
132 item->prev = item->next = NULL;
133
134 rec->evtType = evtType;
135 rec->evtData = evtData;
136 rec->evtFreeData = evtFreeData;
137
138 intSta = cpuIntsOff();
139
140 if (unlikely(atFront)) {
141 b = q->head.next;
142 a = b->prev;
143 } else {
144 a = q->head.prev;
145 b = a->next;
146 }
147
148 a->next = item;
149 item->prev = a;
150 b->prev = item;
151 item->next = b;
152
153 cpuIntsRestore(intSta);
154 platWake();
155 return true;
156 }
157
evtQueueRemoveAllMatching(struct EvtQueue * q,bool (* match)(uint32_t evtType,const void * data,void * context),void * context)158 void evtQueueRemoveAllMatching(struct EvtQueue* q,
159 bool (*match)(uint32_t evtType, const void *data, void *context),
160 void *context)
161 {
162 uint64_t intSta = cpuIntsOff();
163 struct EvtList *pos, *tmp;
164
165 for_each_item_safe (&q->head, pos, tmp) {
166 struct EvtRecord * rec = container_of(pos, struct EvtRecord, item);
167
168 if (match(rec->evtType, rec->evtData, context)) {
169 q->forceDiscardCbk(rec->evtType, rec->evtData, rec->evtFreeData);
170 evtListDel(pos);
171 slabAllocatorFree(q->evtsSlab, rec);
172 }
173 }
174 cpuIntsRestore(intSta);
175 }
176
evtQueueDequeue(struct EvtQueue * q,uint32_t * evtTypeP,void ** evtDataP,TaggedPtr * evtFreeDataP,bool sleepIfNone)177 bool evtQueueDequeue(struct EvtQueue* q, uint32_t *evtTypeP, void **evtDataP,
178 TaggedPtr *evtFreeDataP, bool sleepIfNone)
179 {
180 struct EvtRecord *rec = NULL;
181 uint64_t intSta;
182
183 while(1) {
184 struct EvtList *pos;
185 intSta = cpuIntsOff();
186
187 pos = q->head.next;
188 if (pos != &q->head) {
189 rec = container_of(pos, struct EvtRecord, item);
190 evtListDel(pos);
191 break;
192 }
193 else if (!sleepIfNone)
194 break;
195 else if (!timIntHandler()) {
196 // check for timers
197 // if any fire, do not sleep (since by the time callbacks run, more might be due)
198 platSleep();
199 //first thing when awake: check timers again
200 timIntHandler();
201 }
202 cpuIntsRestore(intSta);
203 }
204
205 cpuIntsRestore(intSta);
206
207 if (!rec)
208 return false;
209
210 *evtTypeP = rec->evtType;
211 *evtDataP = rec->evtData;
212 *evtFreeDataP = rec->evtFreeData;
213 slabAllocatorFree(q->evtsSlab, rec);
214
215 return true;
216 }
217