1 /*
2  * Copyright (C) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "epoll.h"
17 #include "spunge.h"
18 #include "spunge_app.h"
19 #include "socket_common.h"
20 #include "res.h"
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 /*
27  * Search the file inside the eventpoll hash. It add usage count to
28  * the returned item, so the caller must call ep_release_epitem()
29  * after finished using the "struct EpItem".
30  */
EpFind(struct EventPoll * ep,FILLP_INT fd)31 static struct EpItem *EpFind(struct EventPoll *ep, FILLP_INT fd)
32 {
33     struct RbNode *rbp = FILLP_NULL_PTR;
34     struct EpItem *epi = FILLP_NULL_PTR;
35     struct EpItem *ret = FILLP_NULL_PTR;
36 
37     FILLP_UINT loopLimit = g_spunge->resConf.maxEpollItemNum;
38 
39     for (rbp = ep->rbr.rbNode; rbp && loopLimit; loopLimit--) {
40         epi = EpItemEntryRbNode(rbp);
41         if (fd > epi->fileDespcriptor) {
42             rbp = rbp->rbRight;
43         } else if (fd < epi->fileDespcriptor) {
44             rbp = rbp->rbLeft;
45         } else {
46             /* Find it */
47             ret = epi;
48             break;
49         }
50     }
51 
52     return ret;
53 }
54 
55 /*
56  * insert epitem to eventpoll->rbr
57  */
EpRbtreeInsert(struct EventPoll * ep,struct EpItem * epi)58 static void EpRbtreeInsert(struct EventPoll *ep, struct EpItem *epi)
59 {
60     struct RbNode **p = &ep->rbr.rbNode;
61     struct RbNode *parent = FILLP_NULL_PTR;
62     struct EpItem *epic = FILLP_NULL_PTR;
63     FILLP_UINT loopLimit = g_spunge->resConf.maxEpollItemNum;
64 
65     while (*p && loopLimit--) {
66         parent = *p;
67         epic = EpItemEntryRbNode(parent);
68         if (epi->fileDespcriptor > epic->fileDespcriptor) {
69             p = &parent->rbRight;
70         } else {
71             p = &parent->rbLeft;
72         }
73     }
74 
75     epi->rbn.rbLeft = epi->rbn.rbRight = FILLP_NULL_PTR;
76     epi->rbn.rbParent = parent;
77 
78     epi->rbn.color = RB_RED;
79 
80     *p = &epi->rbn;
81 
82     FillpRbInsertColor(&epi->rbn, &ep->rbr);
83 }
84 
85 /*
86  * Add epitem to sock->epoll_taskList
87  * epi is application pointer
88  */
EpollAddToSockWaitList(struct FtSocket * sock,struct EpItem * epi)89 static void EpollAddToSockWaitList(struct FtSocket *sock, struct EpItem *epi)
90 {
91     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
92         FILLP_LOGERR("Sem Wait fail");
93         return;
94     }
95     HlistAddTail(&sock->epTaskList, &epi->sockWaitNode);
96     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
97 }
98 
99 /* Check and triggle the event when do epoll ctl */
EpollCtlTriggleEvent(struct EventPoll * ep,struct FtSocket * sock,struct EpItem * epi)100 static void EpollCtlTriggleEvent(
101     struct EventPoll *ep,
102     struct FtSocket *sock,
103     struct EpItem *epi)
104 {
105     epi->revents = 0;
106     if (SYS_ARCH_ATOMIC_READ(&sock->rcvEvent) > 0) {
107         epi->revents |= SPUNGE_EPOLLIN;
108     }
109 
110     if ((SYS_ARCH_ATOMIC_READ(&sock->sendEvent) != 0) && (SYS_ARCH_ATOMIC_READ(&sock->sendEventCount) > 0)) {
111         epi->revents |= SPUNGE_EPOLLOUT;
112     }
113 
114     epi->revents |= (FILLP_UINT32)sock->errEvent;
115     epi->revents &= epi->event.events;
116 
117     if (epi->revents > 0) {
118         EpSocketReady(ep, epi);
119     }
120 }
121 
EpollMallocEpitem(void)122 static struct EpItem *EpollMallocEpitem(void)
123 {
124     struct EpItem *epi = FILLP_NULL_PTR;
125     FILLP_INT ret = DympAlloc(g_spunge->epitemPool, (void **)&epi, FILLP_FALSE);
126     if ((ret != ERR_OK) || (epi == FILLP_NULL_PTR)) {
127         FILLP_LOGERR("MP_MALLOC epoll failed.");
128         return FILLP_NULL_PTR;
129     }
130 
131     epi->rbn.rbParent = &(epi->rbn);
132     epi->fileDespcriptor = -1;
133     epi->ep = FILLP_NULL_PTR;
134     epi->revents = FILLP_NULL_NUM;
135     HLIST_INIT_NODE(&epi->rdlNode);
136     HLIST_INIT_NODE(&epi->sockWaitNode);
137 
138     return epi;
139 }
140 
141 /*
142  * Modify the interest event mask by dropping an event if the new mask
143  * has a match in the current file status.
144  */
EpModify(struct EventPoll * ep,struct FtSocket * sock,struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)145 static FILLP_INT EpModify(
146     struct EventPoll *ep,
147     struct FtSocket *sock,
148     struct EpItem *epi,
149     FILLP_CONST struct SpungeEpollEvent *event)
150 {
151     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
152         FILLP_LOGERR("Sem wait fail");
153         SET_ERRNO(FILLP_EBUSY);
154         return ERR_COMM;
155     }
156     (void)memcpy_s(&epi->event, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
157     EpollCtlTriggleEvent(ep, sock, epi);
158     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
159     return FILLP_OK;
160 }
161 
162 /*
163   Unlink the "struct EpItem" from all places it might have been hooked up.
164   remove epitem from eventpoll->rbn
165 
166  Comment 1:
167   It can happen that this one is called for an item already unlinked.
168   The check protect us from doing a double unlink ( crash ).
169 
170  Comment 2:
171   Clear the event mask for the unlinked item. This will avoid item
172   notifications to be sent after the unlink operation from inside
173   the kernel->userspace event transfer loop.
174 
175  Comment 3:
176   At this point is safe to do the job, unlink the item from our rb-tree.
177   This operation togheter with the above check closes the door to
178   double unlinks.
179 
180  Comment 4:
181   If the item we are going to remove is inside the ready file descriptors
182   we want to remove it from this list to avoid stale events.
183  */
EpUnlink(struct EventPoll * ep,struct EpItem * epi)184 static FILLP_INT EpUnlink(struct EventPoll *ep, struct EpItem *epi)
185 {
186     /* Comment 1 */
187     if (epi->rbn.rbParent == &(epi->rbn)) {
188         FILLP_LOGERR("struct EpItem already unlinked.");
189         SET_ERRNO(FILLP_EINVAL);
190         return ERR_FAILURE;
191     }
192 
193     /* Comment 2 */
194     epi->event.events = 0;
195 
196     /* Comment 3 */
197     FillpRbErase(&epi->rbn, &ep->rbr);
198 
199     /* Comment 4 */
200     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
201         FILLP_LOGERR("Sem Wait fail");
202         SET_ERRNO(FILLP_EBUSY);
203         return ERR_COMM;
204     }
205 
206     epi->revents = FILLP_NULL_NUM;
207     EpDelRdlnode(ep, epi);
208 
209     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
210 
211     return FILLP_OK;
212 }
213 
214 /*
215  * Removes a "struct EpItem" from the eventpoll hash and deallocates
216  * all the associated resources.
217  * epi is application pointer
218  */
EpRemove(struct EventPoll * ep,struct EpItem * epi)219 static FILLP_INT EpRemove(struct EventPoll *ep, struct EpItem *epi)
220 {
221     FILLP_INT error;
222     FILLP_INT fd;
223     struct FtSocket *sock = FILLP_NULL_PTR;
224     struct HlistNode *node = FILLP_NULL_PTR;
225 
226     if ((ep == FILLP_NULL_PTR) || (epi == FILLP_NULL_PTR)) {
227         FILLP_LOGERR("EpRemove: Inavild parameters passed.");
228         SET_ERRNO(FILLP_EINVAL);
229         return ERR_NULLPTR;
230     }
231 
232     fd = epi->fileDespcriptor;
233 
234     /* For the given fd, already validation is present in upper function
235        SpungeEpollCtl. So no need to validate again for ori_sock
236 
237        FtEpollCtl->SpungeEpollCtl->EpRemove/EpInsert
238     */
239     sock = SockGetSocket(fd);
240     if ((sock == FILLP_NULL_PTR) || (sock->allocState == SOCK_ALLOC_STATE_EPOLL)) {
241         FILLP_LOGERR("EpRemove: SockGetSocket failed.");
242         SET_ERRNO(FILLP_EBADF);
243         return ERR_COMM;
244     }
245 
246     if (SYS_ARCH_SEM_WAIT(&(sock->epollTaskListLock))) {
247         FILLP_LOGERR("sem wait fail");
248         SET_ERRNO(FILLP_EBUSY);
249         return ERR_COMM;
250     }
251     node = HLIST_FIRST(&sock->epTaskList);
252     while (node != FILLP_NULL_PTR) {
253         if (node == &epi->sockWaitNode) {
254             HlistDelete(&sock->epTaskList, node);
255             break;
256         }
257         node = node->next;
258     }
259     (void)SYS_ARCH_SEM_POST(&(sock->epollTaskListLock));
260 
261     /* Really unlink the item from the hash */
262     error = EpUnlink(ep, epi);
263     if (error != ERR_OK) {
264         return error;
265     }
266 
267     DympFree(epi);
268 
269     return FILLP_OK;
270 }
271 
EpGetEventsAndSignal(struct EventPoll * ep,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_SLONG timeout)272 static FILLP_INT EpGetEventsAndSignal(
273     struct EventPoll *ep,
274     struct SpungeEpollEvent *events,
275     FILLP_INT maxEvents,
276     FILLP_SLONG timeout)
277 {
278     FILLP_INT eventCount = 0;
279     struct HlistNode *node = FILLP_NULL_PTR;
280     struct EpItem *epi = FILLP_NULL_PTR;
281 
282     if (SYS_ARCH_SEM_WAIT(&ep->appSem)) {
283         FILLP_LOGERR("app-sem wait fail");
284         return ERR_COMM;
285     }
286     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
287         FILLP_LOGERR("core-sem wait fail");
288         (void)SYS_ARCH_SEM_POST(&ep->appSem);
289         return ERR_COMM;
290     }
291     node = HLIST_FIRST(&ep->rdList);
292     while ((node != FILLP_NULL_PTR) && (eventCount < maxEvents)) {
293         epi = EpItemEntryRdlNode(node);
294         node = node->next;
295 
296         epi->revents &= epi->event.events;
297         EpollUpdateEpEvent(epi);
298 
299         if (epi->revents > 0) {
300             events[eventCount].events = epi->revents;
301             (void)memcpy_s(&events[eventCount].data, sizeof(events[eventCount].data), &epi->event.data,
302                 sizeof(epi->event.data));
303             eventCount++;
304         }
305 
306         /* Check if event is present or not, if present report to application otherwise remove */
307         if ((epi->revents == 0) || (epi->event.events & SPUNGE_EPOLLET)) {
308             EpDelRdlnode(ep, epi);
309         }
310     }
311 
312     if ((timeout != 0) && (eventCount == 0)) {
313         /* caller will wait for signal in this case, so set set signal variable under appCoreSem sem */
314         (void)SYS_ARCH_ATOMIC_SET(&ep->semSignalled, 0);
315     }
316 
317     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
318     (void)SYS_ARCH_SEM_POST(&ep->appSem);
319     if (eventCount > 0) {
320         FILLP_LOGDBG("Get eventCount:%d", eventCount);
321     }
322     return eventCount;
323 }
324 
EpPoll(struct FtSocket * sock,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_SLONG timeout)325 static FILLP_INT EpPoll(
326     struct FtSocket *sock,
327     struct SpungeEpollEvent *events,
328     FILLP_INT maxEvents,
329     FILLP_SLONG timeout)
330 {
331     FILLP_INT eventCount = 0;
332     FILLP_INT semTimedWait;
333     FILLP_LLONG begintime = 0;
334     FILLP_LLONG endtime;
335     FILLP_UCHAR isTakenBeginTs = 0;
336     FILLP_BOOL needLoopNun = FILLP_TRUE;
337     FILLP_SLONG timeoutBkp = timeout;
338     FILLP_SLONG timeoutWork = timeout;
339     struct EventPoll *ep = sock->eventEpoll;
340 
341     /*
342     * We don't have any available event to return to the caller.
343     * We need to sleep here, and we will be wake up by
344     * ep_poll_callback() when events will become available.
345 
346       Here we do not acquire rdlock, there is diffciulty to handle this. If we
347       lock it in the function, and pass the timeout as -1 then it will result in
348       deadlock as the core thread will not get the lock and update the readylist.
349       Also the FtEpollWait is running in another thread, the check here is
350       performs only reading and validate for NULL, hence the wait lock is not
351       acquired. Acquire lock here also might reduce performance
352     */
353     while (needLoopNun == FILLP_TRUE) {
354         if (sock->allocState == SOCK_ALLOC_STATE_EPOLL_TO_CLOSE) {
355             FILLP_LOGERR("epFd will be destroyed, so return");
356             return 0;
357         }
358 
359         eventCount = EpGetEventsAndSignal(ep, events, maxEvents, timeoutBkp);
360         if (eventCount) {
361             break;
362         }
363         if (timeoutBkp == -1) {
364             EPOLL_CPU_PAUSE();
365             if (SYS_ARCH_SEM_WAIT(&ep->waitSem)) {
366                 FILLP_LOGERR("ep_wait fail");
367                 return 0;
368             }
369         } else if (timeoutBkp == 0) {
370             break;
371         } else { /* timed wait */
372             if (isTakenBeginTs == 0) {
373                 begintime = SYS_ARCH_GET_CUR_TIME_LONGLONG(); /* microseconds */
374                 isTakenBeginTs = 1;
375             }
376 
377             semTimedWait = SYS_ARCH_SEM_WAIT_TIMEOUT(&ep->waitSem, timeoutWork);
378             endtime = SYS_ARCH_GET_CUR_TIME_LONGLONG();
379             /* timeoutBkp is in milliseconds and SYS_ARCH_GET_CUR_TIME_LONGLONG() is in microseconds */
380             if ((FILLP_UTILS_US2MS(endtime - begintime)) >= timeoutBkp) {
381                 /* Try again if some event is posted or not, as currently we do not check why sem_wait has returned */
382                 eventCount = EpGetEventsAndSignal(ep, events, maxEvents, 0);
383                 (void)semTimedWait;
384 
385                 break;
386             }
387 
388             timeoutWork = (FILLP_SLONG)(timeoutBkp - (FILLP_UTILS_US2MS(endtime - begintime)));
389             continue;
390         }
391     }
392 
393     return eventCount;
394 }
395 
EpollMallocEventpoll()396 static struct EventPoll *EpollMallocEventpoll()
397 {
398     struct EventPoll *ep = FILLP_NULL_PTR;
399     FILLP_INT ret = DympAlloc(g_spunge->eventpollPool, (void **)&ep, FILLP_FALSE);
400     if ((ret != ERR_OK) || (ep == FILLP_NULL_PTR)) {
401         FILLP_LOGERR("EpollMallocEventpoll: MP_MALLOC failed. \r\n");
402         SET_ERRNO(FILLP_ENOMEM);
403         return FILLP_NULL_PTR;
404     }
405 
406     ret = SYS_ARCH_SEM_INIT(&ep->appSem, 1);
407     if (ret != FILLP_OK) {
408         FILLP_LOGERR("EpollMallocEventpoll:socket create epoll semaphore failed. ");
409         DympFree(ep);
410         SET_ERRNO(FILLP_EFAULT);
411         return FILLP_NULL_PTR;
412     }
413 
414     ret = SYS_ARCH_SEM_INIT(&ep->waitSem, 0);
415     if (ret != FILLP_OK) {
416         (void)SYS_ARCH_SEM_DESTROY(&ep->appSem);
417         DympFree(ep);
418         SET_ERRNO(FILLP_EFAULT);
419         return FILLP_NULL_PTR;
420     }
421 
422     ep->rbr.rbNode = FILLP_NULL_PTR;
423     HLIST_INIT(&ep->rdList);
424     ret = SYS_ARCH_SEM_INIT(&ep->appCoreSem, 1);
425     if (ret != FILLP_OK) {
426         (void)SYS_ARCH_SEM_DESTROY(&ep->waitSem);
427         (void)SYS_ARCH_SEM_DESTROY(&ep->appSem);
428         DympFree(ep);
429         SET_ERRNO(FILLP_EFAULT);
430         return FILLP_NULL_PTR;
431     }
432 
433     (void)SYS_ARCH_ATOMIC_SET(&ep->semSignalled, 0);
434     return ep;
435 }
436 
437 /*
438  * Called by epoll_ctl with "add" op
439  */
EpInsert(struct EventPoll * ep,FILLP_CONST struct SpungeEpollEvent * event,FILLP_INT fd)440 static FILLP_INT EpInsert(
441     struct EventPoll *ep,
442     FILLP_CONST struct SpungeEpollEvent *event,
443     FILLP_INT fd)
444 {
445     struct EpItem *epi = FILLP_NULL_PTR;
446 
447     /* If the file is already "ready" we drop it inside the ready list
448        For the given fd, already validation is present in upper function
449        SpungeEpollCtl. So no need to validate again for ori_sock
450        FtEpollCtl->SpungeEpollCtl->EpRemove/EpInsert
451     */
452     struct FtSocket *sock = SockGetSocket(fd);
453     if (sock == FILLP_NULL_PTR) {
454         SET_ERRNO(FILLP_EBADF);
455         FILLP_LOGERR("SockGetSocket returns NULL, fillp_sock_id:%d", fd);
456         return ERR_NO_SOCK;
457     }
458 
459     if (sock->allocState == SOCK_ALLOC_STATE_EPOLL) {
460         FILLP_LOGERR("Epoll socket not supported, fillp_sock_id:%d", fd);
461         SET_ERRNO(FILLP_EBADF);
462         return ERR_NO_SOCK;
463     }
464 
465     epi = EpollMallocEpitem();
466 
467     if (epi == FILLP_NULL_PTR) {
468         FILLP_LOGERR("EpollMallocEpitem returns NULL.");
469         SET_ERRNO(FILLP_ENOMEM);
470         return ERR_NULLPTR;
471     }
472 
473     epi->ep = ep;
474     (void)memcpy_s(&epi->event, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
475     epi->fileDespcriptor = fd;
476 
477     EpRbtreeInsert(ep, epi);
478     /* add to fd wait queue */
479     EpollAddToSockWaitList(sock, epi);
480 
481     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
482         FILLP_LOGERR("Fail to wait appCoreSem");
483         SET_ERRNO(FILLP_EBUSY);
484         return ERR_COMM;
485     }
486     EpollCtlTriggleEvent(ep, sock, epi);
487     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
488 
489     return FILLP_OK;
490 }
491 
SpungeGetEpollSocketByFd(FILLP_INT epFd)492 static struct FtSocket *SpungeGetEpollSocketByFd(FILLP_INT epFd)
493 {
494     struct FtSocket *epollSock = SockGetSocket(epFd);
495     if (epollSock == FILLP_NULL_PTR) {
496         FILLP_LOGERR("SpungeEpollCtl: SockGetSocket failed.");
497         SET_ERRNO(FILLP_EBADF);
498         return FILLP_NULL_PTR;
499     }
500 
501     if (SYS_ARCH_RWSEM_TRYRDWAIT(&epollSock->sockConnSem) != ERR_OK) {
502         FILLP_LOGERR("Socket-%d state is changing,maybe closing ", epFd);
503         SET_ERRNO(FILLP_EBUSY);
504         return FILLP_NULL_PTR;
505     }
506 
507     if (epollSock->allocState != SOCK_ALLOC_STATE_EPOLL) {
508         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
509         FILLP_LOGERR("SpungeEpollCtl: epoll socket state is incorrect for epoll sock Id=%d , state=%d",
510             epFd, epollSock->allocState);
511         SET_ERRNO(FILLP_ENOTSOCK);
512         return FILLP_NULL_PTR;
513     }
514 
515     if (epollSock->eventEpoll == FILLP_NULL_PTR) {
516         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
517         FILLP_LOGERR("SpungeEpollCtl: epollSock->eventEpoll is null. ");
518 
519         SET_ERRNO(FILLP_EINVAL);
520         return FILLP_NULL_PTR;
521     }
522 
523     return epollSock;
524 }
525 
SpungeEpollCtlCheckSockValid(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT fd)526 static FILLP_INT SpungeEpollCtlCheckSockValid(struct FtSocket *epollSock, struct FtSocket *sock, FILLP_INT fd)
527 {
528     if (SYS_ARCH_RWSEM_TRYRDWAIT(&sock->sockConnSem) != ERR_OK) {
529         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
530         FILLP_LOGERR("Socket-%d state is changing,maybe closing ", fd);
531         SET_ERRNO(FILLP_EBUSY);
532         return -1;
533     }
534 
535     if ((sock->allocState != SOCK_ALLOC_STATE_COMM) && (sock->allocState != SOCK_ALLOC_STATE_WAIT_TO_CLOSE)) {
536         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
537         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
538         FILLP_LOGERR("SpungeEpollCtl: socket stat is wrong ");
539         if (sock->allocState == SOCK_ALLOC_STATE_EPOLL) {
540             SET_ERRNO(FILLP_EINVAL);
541         } else {
542             SET_ERRNO(FILLP_EBADF);
543         }
544 
545         return -1;
546     }
547 
548     if (SYS_ARCH_SEM_WAIT(&epollSock->eventEpoll->appSem)) {
549         FILLP_LOGERR("sem-wait fail");
550         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
551         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
552         SET_ERRNO(FILLP_EBUSY);
553         return -1;
554     }
555 
556     return ERR_OK;
557 }
558 
SpungeEpollCtlHandleAddEvent(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT epFd,FILLP_CONST struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)559 static FILLP_INT SpungeEpollCtlHandleAddEvent(
560     struct FtSocket *epollSock,
561     struct FtSocket *sock,
562     FILLP_INT epFd,
563     FILLP_CONST struct EpItem *epi,
564     FILLP_CONST struct SpungeEpollEvent *event)
565 {
566     FILLP_INT error = 0;
567     struct SpungeEpollEvent epds;
568 
569     if (epi != FILLP_NULL_PTR) {
570         SET_ERRNO(FILLP_EEXIST);
571         return -1;
572     }
573 
574     /* It means, that A ft-socket can be registered up to 10 epoll instances, not
575           more than that. This value is compile config controlled.
576     */
577     if (sock->associatedEpollInstanceIdx >= FILLP_NUM_OF_EPOLL_INSTANCE_SUPPORTED) {
578         FILLP_LOGERR("already added too much socket, sock->associatedEpollInstanceIdx:%u",
579             sock->associatedEpollInstanceIdx);
580         SET_ERRNO(FILLP_ENOMEM);
581         return -1;
582     }
583 
584     (void)memset_s(&epds, sizeof(struct SpungeEpollEvent), 0, sizeof(struct SpungeEpollEvent));
585     (void)memcpy_s(&epds, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
586     epds.events |= ((FILLP_UINT32)SPUNGE_EPOLLERR | (FILLP_UINT32)SPUNGE_EPOLLHUP);
587 
588     error = EpInsert(epollSock->eventEpoll, &epds, sock->index);
589     if (error != ERR_OK) {
590         return -1;
591     }
592     (void)SYS_ARCH_ATOMIC_INC(&sock->epollWaiting, 1);
593 
594     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
595         FILLP_LOGERR("tasklock fail");
596         SET_ERRNO(FILLP_EBUSY);
597         return -1;
598     }
599     sock->associatedEpollInstanceArr[sock->associatedEpollInstanceIdx++] = epFd;
600     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
601 
602     return ERR_OK;
603 }
604 
SpungeEpollCtlHandleDelEvent(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT epFd,struct EpItem * epi)605 static FILLP_INT SpungeEpollCtlHandleDelEvent(
606     struct FtSocket *epollSock,
607     struct FtSocket *sock,
608     FILLP_INT epFd,
609     struct EpItem *epi)
610 {
611     FILLP_INT error;
612 
613     if (epi == FILLP_NULL_PTR) {
614         SET_ERRNO(FILLP_ENOENT);
615         return -1;
616     }
617 
618     error = EpRemove(epollSock->eventEpoll, epi);
619     if (error != ERR_OK) {
620         return -1;
621     }
622     (void)SYS_ARCH_ATOMIC_DEC(&sock->epollWaiting, 1);
623 
624     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
625         FILLP_LOGERR("Wait epoll tasklist fail");
626         SET_ERRNO(FILLP_EBUSY);
627         return -1;
628     }
629     SpungeDelEpInstFromFtSocket(sock, epFd);
630     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
631 
632     return ERR_OK;
633 }
634 
SpungeEpollCtlHandleModEvent(struct FtSocket * epollSock,struct FtSocket * sock,struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)635 static FILLP_INT SpungeEpollCtlHandleModEvent(
636     struct FtSocket *epollSock,
637     struct FtSocket *sock,
638     struct EpItem *epi,
639     FILLP_CONST struct SpungeEpollEvent *event)
640 {
641     struct SpungeEpollEvent epds;
642     FILLP_INT error;
643 
644     if (epi == FILLP_NULL_PTR) {
645         SET_ERRNO(FILLP_ENOENT);
646         return -1;
647     }
648 
649     (void)memset_s(&epds, sizeof(struct SpungeEpollEvent), 0, sizeof(struct SpungeEpollEvent));
650     (void)memcpy_s(&epds, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
651     epds.events |= ((FILLP_UINT32)SPUNGE_EPOLLERR | (FILLP_UINT32)SPUNGE_EPOLLHUP);
652     error = EpModify(epollSock->eventEpoll, sock, epi, &epds);
653     if (error != ERR_OK) {
654         return -1;
655     }
656 
657     return ERR_OK;
658 }
659 
SpungeEpollCtlParaChk(FILLP_INT epFd,FILLP_INT op,FILLP_INT fd,FILLP_CONST struct SpungeEpollEvent * event)660 static FILLP_INT SpungeEpollCtlParaChk(FILLP_INT epFd, FILLP_INT op, FILLP_INT fd,
661     FILLP_CONST struct SpungeEpollEvent *event)
662 {
663     /* For SPUNGE_EPOLL_CTL_DEL: Old kernels do not check the 'event' NULL case */
664     if (((op == SPUNGE_EPOLL_CTL_ADD) || (op == SPUNGE_EPOLL_CTL_MOD)) && (event == FILLP_NULL_PTR)) {
665         FILLP_LOGERR("SpungeEpollCtl: 'event' param is NULL");
666         SET_ERRNO(FILLP_EFAULT);
667         return -1;
668     }
669 
670     if (event != FILLP_NULL_PTR) {
671         FILLP_LOGINF("epFd:%d,op:%d,fillp_sock_id:%d,event->events:%x,event->u64:%llx",
672             epFd, op, fd, event->events, event->data.u64);
673         FILLP_LOGINF("sizeof(event):%zu, sizeof(evnent->events):%zu, sizeof(data):%zu",
674             sizeof(*event), sizeof(event->events), sizeof(event->data));
675     } else {
676         FILLP_LOGWAR("epFd:%d,op:%d,fillp_sock_id:%d,event null", epFd, op, fd);
677     }
678     return 0;
679 }
680 
SpungeEpollCtl(FILLP_INT epFd,FILLP_INT op,FILLP_INT fd,FILLP_CONST struct SpungeEpollEvent * event)681 FILLP_INT SpungeEpollCtl(FILLP_INT epFd, FILLP_INT op, FILLP_INT fd, FILLP_CONST struct SpungeEpollEvent *event)
682 {
683     struct FtSocket *epollSock = FILLP_NULL_PTR;
684     struct FtSocket *sock = FILLP_NULL_PTR;
685     struct EpItem *epi = FILLP_NULL_PTR;
686     FILLP_INT error;
687 
688     if (SpungeEpollCtlParaChk(epFd, op, fd, event) != 0) {
689         return -1;
690     }
691 
692     /* Get the epoll instance socket ID */
693     epollSock = SpungeGetEpollSocketByFd(epFd);
694     if (epollSock == FILLP_NULL_PTR) {
695         return -1;
696     }
697 
698     sock = SockGetSocket(fd);
699     if (sock == FILLP_NULL_PTR) {
700         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
701         FILLP_LOGERR("SpungeEpollCtl: SockGetSocket failed.");
702         SET_ERRNO(FILLP_EBADF);
703         return -1;
704     }
705 
706     error = SpungeEpollCtlCheckSockValid(epollSock, sock, fd);
707     if (error != ERR_OK) {
708         return -1;
709     }
710 
711     epi = EpFind(epollSock->eventEpoll, fd);
712 
713     switch (op) {
714         case SPUNGE_EPOLL_CTL_ADD:
715             error = SpungeEpollCtlHandleAddEvent(epollSock, sock, epFd, epi, event);
716             break;
717         case SPUNGE_EPOLL_CTL_DEL:
718             error = SpungeEpollCtlHandleDelEvent(epollSock, sock, epFd, epi);
719             break;
720         case SPUNGE_EPOLL_CTL_MOD:
721             error = SpungeEpollCtlHandleModEvent(epollSock, sock, epi, event);
722             break;
723         default:
724             SET_ERRNO(FILLP_EINVAL);
725             error = -1;
726             break;
727     }
728 
729     (void)SYS_ARCH_SEM_POST(&epollSock->eventEpoll->appSem);
730     (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
731     (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
732     FILLP_LOGDBG("return value:%d", error);
733     return error;
734 }
735 
SpungeEpollFindRemove(FILLP_INT epFd,FILLP_INT fd)736 FILLP_INT SpungeEpollFindRemove(FILLP_INT epFd, FILLP_INT fd)
737 {
738     struct FtSocket *sock = FILLP_NULL_PTR;
739     struct EpItem *epi = FILLP_NULL_PTR;
740 
741     /* Get the epoll instance socket ID */
742     struct FtSocket *epollSock = SockGetSocket(epFd);
743     if (epollSock == FILLP_NULL_PTR) {
744         FILLP_LOGERR("SpungeEpollFindRemove: SockGetSocket failed.");
745         SET_ERRNO(FILLP_EBADF);
746         return ERR_PARAM;
747     }
748 
749     if (SYS_ARCH_RWSEM_TRYRDWAIT(&epollSock->sockConnSem) != ERR_OK) {
750         FILLP_LOGERR("SpungeEpollFindRemove: Socket-%d state is changing,maybe closing", epFd);
751         SET_ERRNO(FILLP_EBUSY);
752         return ERR_COMM;
753     }
754 
755     if (epollSock->allocState != SOCK_ALLOC_STATE_EPOLL) {
756         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
757         FILLP_LOGWAR("SpungeEpollFindRemove: epoll socket state is incorrect for epoll sock Id=%d , state=%d\r\n",
758             epFd, epollSock->allocState);
759         return ERR_PARAM;
760     }
761 
762     if (epollSock->eventEpoll == FILLP_NULL_PTR) {
763         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
764         FILLP_LOGERR("SpungeEpollFindRemove: epollSock->eventEpoll is null.");
765         return ERR_NULLPTR;
766     }
767 
768     sock = SockGetSocket(fd);
769     if (sock == FILLP_NULL_PTR) {
770         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
771         FILLP_LOGERR("SpungeEpollFindRemove: SockGetSocket failed.");
772         SET_ERRNO(FILLP_EBADF);
773         return ERR_PARAM;
774     }
775 
776     if (SYS_ARCH_SEM_WAIT(&epollSock->eventEpoll->appSem)) {
777         FILLP_LOGERR("Error to wait appSem");
778         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
779         return ERR_COMM;
780     }
781 
782     epi = EpFind(epollSock->eventEpoll, fd);
783     if (epi != FILLP_NULL_PTR) {
784         (void)EpRemove(epollSock->eventEpoll, epi);
785         (void)SYS_ARCH_ATOMIC_DEC(&sock->epollWaiting, 1);
786         SpungeDelEpInstFromFtSocket(sock, epFd);
787     }
788 
789     (void)SYS_ARCH_SEM_POST(&epollSock->eventEpoll->appSem);
790     (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
791     return ERR_OK;
792 }
793 
SpungeEpollWait(FILLP_INT epFd,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_INT timeout)794 FILLP_INT SpungeEpollWait(FILLP_INT epFd, struct SpungeEpollEvent *events, FILLP_INT maxEvents, FILLP_INT timeout)
795 {
796     FILLP_INT num;
797     struct FtSocket *sock;
798     FILLP_INT ret;
799     sock = SockGetSocket(epFd);
800     if (sock == FILLP_NULL_PTR) {
801         FILLP_LOGERR("SpungeEpollWait: SockGetSocket failed. ");
802         SET_ERRNO(FILLP_EBADF);
803         return -1;
804     }
805 
806     ret = SYS_ARCH_RWSEM_TRYRDWAIT(&sock->sockConnSem);
807     if (ret != ERR_OK) {
808         FILLP_LOGERR("Socket-%d state is changing,maybe closing", epFd);
809         SET_ERRNO(FILLP_EBUSY);
810         return -1;
811     }
812 
813     if ((sock->allocState != SOCK_ALLOC_STATE_EPOLL) || (sock->eventEpoll == FILLP_NULL_PTR)) {
814         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
815         FILLP_LOGERR("SpungeEpollWait: allocState is not epoll or eventEpoll is NULL. ");
816 
817         SET_ERRNO(FILLP_ENOTSOCK);
818         return -1;
819     }
820 
821     /* The maximum number of event must be greater than zero */
822     if ((maxEvents <= 0) || (events == FILLP_NULL_PTR)) {
823         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
824         FILLP_LOGERR("SpungeEpollWait: The maximum number of event must be greater than zero. ");
825         SET_ERRNO(FILLP_EINVAL);
826         return -1;
827     }
828 
829     num = EpPoll(sock, events, maxEvents, timeout);
830 
831     (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
832     return num;
833 }
834 
SpungeEpollCreate(void)835 FILLP_INT SpungeEpollCreate(void)
836 {
837     struct FtSocket *sock = SpungeAllocSock(SOCK_ALLOC_STATE_EPOLL);
838     struct EventPoll *ep = FILLP_NULL_PTR;
839 
840     FILLP_LOGINF("create epoll");
841 
842     if (sock == FILLP_NULL_PTR) {
843         FILLP_LOGERR("SpungeEpollCreate: alloc sock failed.");
844         SET_ERRNO(FILLP_ENOMEM);
845         return -1;
846     }
847 
848     ep = EpollMallocEventpoll();
849     if (ep == FILLP_NULL_PTR) {
850         FILLP_LOGINF("Fail to alloc ep");
851         sock->allocState = SOCK_ALLOC_STATE_FREE;
852         SockFreeSocket(sock);
853         return -1;
854     }
855 
856     sock->eventEpoll = ep;
857     sock->isListenSock = FILLP_FALSE;
858     sock->isSockBind = FILLP_FALSE;
859 
860     (void)SYS_ARCH_ATOMIC_SET(&sock->rcvEvent, 0);
861     (void)SYS_ARCH_ATOMIC_SET(&sock->sendEvent, 0);
862     sock->errEvent = 0;
863 
864     (void)SYS_ARCH_ATOMIC_SET(&sock->epollWaiting, 0);
865     HLIST_INIT(&sock->epTaskList);
866 
867     FILLP_LOGINF("create epoll return, epFd:%d", sock->index);
868     return sock->index;
869 }
870 
871 #ifdef __cplusplus
872 }
873 #endif
874