1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "poller.h"
16 #include "sched/execute_ctx.h"
17 #include "tm/scpu_task.h"
18 #include "dfx/log/ffrt_log_api.h"
19
20 namespace ffrt {
Poller()21 Poller::Poller() noexcept: m_epFd { ::epoll_create1(EPOLL_CLOEXEC) }
22 {
23 m_wakeData.cb = nullptr;
24 m_wakeData.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
25 epoll_event ev { .events = EPOLLIN, .data = { .ptr = static_cast<void*>(&m_wakeData) } };
26 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, m_wakeData.fd, &ev) < 0) {
27 FFRT_LOGE("epoll_ctl add fd error: efd=%d, fd=%d, errorno=%d", m_epFd, m_wakeData.fd, errno);
28 std::terminate();
29 }
30 }
31
~Poller()32 Poller::~Poller() noexcept
33 {
34 ::close(m_wakeData.fd);
35 ::close(m_epFd);
36 timerHandle_ = -1;
37 m_wakeDataMap.clear();
38 m_delCntMap.clear();
39 timerMap_.clear();
40 executedHandle_.clear();
41 flag_ = EpollStatus::TEARDOWN;
42 m_waitTaskMap.clear();
43 m_cachedTaskEvents.clear();
44 }
45
Instance()46 PollerProxy& PollerProxy::Instance()
47 {
48 static PollerProxy pollerInstance;
49 return pollerInstance;
50 }
51
AddFdEvent(int op,uint32_t events,int fd,void * data,ffrt_poller_cb cb)52 int Poller::AddFdEvent(int op, uint32_t events, int fd, void* data, ffrt_poller_cb cb) noexcept
53 {
54 auto wakeData = std::make_unique<WakeDataWithCb>(fd, data, cb, ExecuteCtx::Cur()->task);
55 void* ptr = static_cast<void*>(wakeData.get());
56 if (ptr == nullptr || wakeData == nullptr) {
57 FFRT_LOGE("Construct WakeDataWithCb instance failed! or wakeData is nullptr");
58 return -1;
59 }
60 wakeData->monitorEvents = events;
61
62 epoll_event ev = { .events = events, .data = { .ptr = ptr } };
63 std::unique_lock lock(m_mapMutex);
64 if (epoll_ctl(m_epFd, op, fd, &ev) != 0) {
65 FFRT_LOGE("epoll_ctl add fd error: efd=%d, fd=%d, errorno=%d", m_epFd, fd, errno);
66 return -1;
67 }
68
69 if (op == EPOLL_CTL_ADD) {
70 m_wakeDataMap[fd].emplace_back(std::move(wakeData));
71 fdEmpty_.store(false);
72 } else if (op == EPOLL_CTL_MOD) {
73 auto iter = m_wakeDataMap.find(fd);
74 FFRT_COND_RETURN_ERROR(iter == m_wakeDataMap.end(), -1, "fd %d does not exist in wakeDataMap", fd);
75 if (iter->second.size() != 1) {
76 FFRT_LOGE("epoll_ctl mod fd wakedata num invalid");
77 return -1;
78 }
79 iter->second.pop_back();
80 iter->second.emplace_back(std::move(wakeData));
81 }
82 return 0;
83 }
84
DelFdEvent(int fd)85 int Poller::DelFdEvent(int fd) noexcept
86 {
87 std::unique_lock lock(m_mapMutex);
88 if (epoll_ctl(m_epFd, EPOLL_CTL_DEL, fd, nullptr) != 0) {
89 FFRT_LOGE("epoll_ctl del fd error: efd=%d, fd=%d, errorno=%d", m_epFd, fd, errno);
90 return -1;
91 }
92
93 m_delCntMap[fd]++;
94 WakeUp();
95 return 0;
96 }
97
ClearCachedEvents(CPUEUTask * task)98 void Poller::ClearCachedEvents(CPUEUTask* task) noexcept
99 {
100 std::unique_lock lock(m_mapMutex);
101 auto iter = m_cachedTaskEvents.find(task);
102 if (iter == m_cachedTaskEvents.end()) {
103 return;
104 }
105 m_cachedTaskEvents.erase(iter);
106 }
107
FetchCachedEventAndDoUnmask(EventVec & cachedEventsVec,struct epoll_event * eventsVec)108 int Poller::FetchCachedEventAndDoUnmask(EventVec& cachedEventsVec, struct epoll_event* eventsVec) noexcept
109 {
110 std::unordered_map<int, int> seenFd;
111 int fdCnt = 0;
112 for (size_t i = 0; i < cachedEventsVec.size(); i++) {
113 auto eventInfo = cachedEventsVec[i];
114 int currFd = eventInfo.data.fd;
115 // check if seen
116 auto iter = seenFd.find(currFd);
117 if (iter == seenFd.end()) {
118 // if not seen, copy cached events and record idx
119 eventsVec[fdCnt].data.fd = currFd;
120 eventsVec[fdCnt].events = eventInfo.events;
121 seenFd[currFd] = fdCnt;
122 fdCnt++;
123 } else {
124 // if seen, update event to newest
125 eventsVec[iter->second].events = eventInfo.events;
126 FFRT_LOGD("fd[%d] has mutilple cached events", currFd);
127 continue;
128 }
129
130 // Unmask to origin events
131 auto wakeDataIter = m_wakeDataMap.find(currFd);
132 if (wakeDataIter == m_wakeDataMap.end() || wakeDataIter->second.size() == 0) {
133 FFRT_LOGD("fd[%d] may be deleted", currFd);
134 continue;
135 }
136
137 auto& wakeData = wakeDataIter->second.back();
138 epoll_event ev = { .events = wakeData->monitorEvents, .data = { .ptr = static_cast<void*>(wakeData.get()) } };
139 if (epoll_ctl(m_epFd, EPOLL_CTL_MOD, currFd, &ev) != 0) {
140 FFRT_LOGE("fd[%d] epoll ctl mod fail, errorno=%d", currFd, errno);
141 continue;
142 }
143 }
144 return fdCnt;
145 }
146
FetchCachedEventAndDoUnmask(CPUEUTask * task,struct epoll_event * eventsVec)147 int Poller::FetchCachedEventAndDoUnmask(CPUEUTask* task, struct epoll_event* eventsVec) noexcept
148 {
149 // should used in lock
150 auto syncTaskIter = m_cachedTaskEvents.find(task);
151 if (syncTaskIter == m_cachedTaskEvents.end() || syncTaskIter->second.size() == 0) {
152 return 0;
153 }
154
155 int nfds = FetchCachedEventAndDoUnmask(syncTaskIter->second, eventsVec);
156 m_cachedTaskEvents.erase(syncTaskIter);
157 return nfds;
158 }
159
WaitFdEvent(struct epoll_event * eventsVec,int maxevents,int timeout)160 int Poller::WaitFdEvent(struct epoll_event* eventsVec, int maxevents, int timeout) noexcept
161 {
162 FFRT_COND_DO_ERR((eventsVec == nullptr), return -1, "eventsVec cannot be null");
163
164 auto task = ExecuteCtx::Cur()->task;
165 if (!task) {
166 FFRT_LOGE("nonworker shall not call this fun.");
167 return -1;
168 }
169
170 FFRT_COND_DO_ERR((maxevents < EPOLL_EVENT_SIZE), return -1, "maxEvents:%d cannot be less than 1024", maxevents);
171
172 int nfds = 0;
173 if (ThreadWaitMode(task)) {
174 std::unique_lock<std::mutex> lck(task->mutex_);
175 m_mapMutex.lock();
176 int cachedNfds = FetchCachedEventAndDoUnmask(task, eventsVec);
177 if (cachedNfds > 0) {
178 m_mapMutex.unlock();
179 FFRT_LOGD("task[%s] id[%d] has [%d] cached events, return directly",
180 task->label.c_str(), task->gid, cachedNfds);
181 return cachedNfds;
182 }
183
184 if (m_waitTaskMap.find(task) != m_waitTaskMap.end()) {
185 FFRT_LOGE("task has waited before");
186 m_mapMutex.unlock();
187 return 0;
188 }
189 if (FFRT_UNLIKELY(LegacyMode(task))) {
190 task->blockType = BlockType::BLOCK_THREAD;
191 }
192 auto currTime = std::chrono::steady_clock::now();
193 m_waitTaskMap[task] = {static_cast<void*>(eventsVec), maxevents, &nfds, currTime};
194 if (timeout > -1) {
195 FFRT_LOGD("poller meet timeout={%d}", timeout);
196 RegisterTimer(timeout, nullptr, nullptr);
197 }
198 m_mapMutex.unlock();
199 reinterpret_cast<SCPUEUTask*>(task)->waitCond_.wait(lck);
200 FFRT_LOGD("task[%s] id[%d] has [%d] events", task->label.c_str(), task->gid, nfds);
201 return nfds;
202 }
203
204 CoWait([&](CPUEUTask *task)->bool {
205 m_mapMutex.lock();
206 int cachedNfds = FetchCachedEventAndDoUnmask(task, eventsVec);
207 if (cachedNfds > 0) {
208 m_mapMutex.unlock();
209 FFRT_LOGD("task[%s] id[%d] has [%d] cached events, return directly",
210 task->label.c_str(), task->gid, cachedNfds);
211 nfds = cachedNfds;
212 return false;
213 }
214
215 if (m_waitTaskMap.find(task) != m_waitTaskMap.end()) {
216 FFRT_LOGE("task has waited before");
217 m_mapMutex.unlock();
218 return false;
219 }
220 auto currTime = std::chrono::steady_clock::now();
221 m_waitTaskMap[task] = {static_cast<void*>(eventsVec), maxevents, &nfds, currTime};
222 if (timeout > -1) {
223 FFRT_LOGD("poller meet timeout={%d}", timeout);
224 RegisterTimer(timeout, nullptr, nullptr);
225 }
226 m_mapMutex.unlock();
227 // The ownership of the task belongs to m_waitTaskMap, and the task cannot be accessed any more.
228 return true;
229 });
230 FFRT_LOGD("task[%s] id[%d] has [%d] events", task->label.c_str(), task->gid, nfds);
231 return nfds;
232 }
233
WakeUp()234 void Poller::WakeUp() noexcept
235 {
236 uint64_t one = 1;
237 (void)::write(m_wakeData.fd, &one, sizeof one);
238 }
239
ProcessWaitedFds(int nfds,std::unordered_map<CPUEUTask *,EventVec> & syncTaskEvents,std::array<epoll_event,EPOLL_EVENT_SIZE> & waitedEvents)240 void Poller::ProcessWaitedFds(int nfds, std::unordered_map<CPUEUTask*, EventVec>& syncTaskEvents,
241 std::array<epoll_event, EPOLL_EVENT_SIZE>& waitedEvents) noexcept
242 {
243 for (unsigned int i = 0; i < static_cast<unsigned int>(nfds); ++i) {
244 struct WakeDataWithCb *data = reinterpret_cast<struct WakeDataWithCb *>(waitedEvents[i].data.ptr);
245 int currFd = data->fd;
246 if (currFd == m_wakeData.fd) {
247 uint64_t one = 1;
248 (void)::read(m_wakeData.fd, &one, sizeof one);
249 continue;
250 }
251
252 if (data->cb != nullptr) {
253 data->cb(data->data, waitedEvents[i].events);
254 continue;
255 }
256
257 if (data->task != nullptr) {
258 epoll_event ev = { .events = waitedEvents[i].events, .data = {.fd = currFd} };
259 syncTaskEvents[data->task].push_back(ev);
260 }
261 }
262 }
263
264 namespace {
WakeTask(CPUEUTask * task)265 void WakeTask(CPUEUTask* task)
266 {
267 if (ThreadNotifyMode(task)) {
268 std::unique_lock<std::mutex> lck(task->mutex_);
269 if (BlockThread(task)) {
270 task->blockType = BlockType::BLOCK_COROUTINE;
271 }
272 reinterpret_cast<SCPUEUTask*>(task)->waitCond_.notify_one();
273 } else {
274 CoRoutineFactory::CoWakeFunc(task, false);
275 }
276 }
277
CopyEventsToConsumer(EventVec & cachedEventsVec,struct epoll_event * eventsVec)278 int CopyEventsToConsumer(EventVec& cachedEventsVec, struct epoll_event* eventsVec) noexcept
279 {
280 int nfds = cachedEventsVec.size();
281 for (int i = 0; i < nfds; i++) {
282 eventsVec[i].events = cachedEventsVec[i].events;
283 eventsVec[i].data.fd = cachedEventsVec[i].data.fd;
284 }
285 return nfds;
286 }
287
CopyEventsInfoToConsumer(SyncData & taskInfo,EventVec & cachedEventsVec)288 void CopyEventsInfoToConsumer(SyncData& taskInfo, EventVec& cachedEventsVec)
289 {
290 epoll_event* eventsPtr = (epoll_event*)taskInfo.eventsPtr;
291 int* nfdsPtr = taskInfo.nfdsPtr;
292 if (eventsPtr == nullptr || nfdsPtr == nullptr) {
293 FFRT_LOGE("usr ptr is nullptr");
294 return;
295 }
296 *nfdsPtr = CopyEventsToConsumer(cachedEventsVec, eventsPtr);
297 }
298 } // namespace
299
CacheEventsAndDoMask(CPUEUTask * task,EventVec & eventVec)300 void Poller::CacheEventsAndDoMask(CPUEUTask* task, EventVec& eventVec) noexcept
301 {
302 for (size_t i = 0; i < eventVec.size(); i++) {
303 int currFd = eventVec[i].data.fd;
304 auto delIter = m_delCntMap.find(currFd);
305 if (delIter != m_delCntMap.end()) {
306 unsigned int delCnt = static_cast<unsigned int>(delIter->second);
307 auto& WakeDataList = m_wakeDataMap[currFd];
308 if (WakeDataList.size() == delCnt) {
309 continue;
310 }
311 }
312 struct epoll_event maskEv;
313 maskEv.events = 0;
314 if (epoll_ctl(m_epFd, EPOLL_CTL_MOD, currFd, &maskEv) != 0 && errno != ENOENT) {
315 // ENOENT indicate fd is not in epfd, may be deleted
316 FFRT_LOGW("epoll_ctl mod fd error: efd=%d, fd=%d, errorno=%d", m_epFd, currFd, errno);
317 }
318 FFRT_LOGD("fd[%d] event has no consumer, so cache it", currFd);
319 }
320 auto& syncTaskEvents = m_cachedTaskEvents[task];
321 syncTaskEvents.insert(syncTaskEvents.end(),
322 std::make_move_iterator(eventVec.begin()), std::make_move_iterator(eventVec.end()));
323 }
324
WakeSyncTask(std::unordered_map<CPUEUTask *,EventVec> & syncTaskEvents)325 void Poller::WakeSyncTask(std::unordered_map<CPUEUTask*, EventVec>& syncTaskEvents) noexcept
326 {
327 if (syncTaskEvents.empty()) {
328 return;
329 }
330
331 m_mapMutex.lock();
332 for (auto& taskEventPair : syncTaskEvents) {
333 CPUEUTask* currTask = taskEventPair.first;
334 auto iter = m_waitTaskMap.find(currTask);
335 if (iter == m_waitTaskMap.end()) {
336 CacheEventsAndDoMask(currTask, taskEventPair.second);
337 continue;
338 }
339
340 CopyEventsInfoToConsumer(iter->second, taskEventPair.second);
341 m_waitTaskMap.erase(iter);
342
343 WakeTask(currTask);
344 }
345 m_mapMutex.unlock();
346 }
347
GetTaskWaitTime(CPUEUTask * task)348 uint64_t Poller::GetTaskWaitTime(CPUEUTask* task) noexcept
349 {
350 std::unique_lock lock(m_mapMutex);
351 auto iter = m_waitTaskMap.find(task);
352 if (iter == m_waitTaskMap.end()) {
353 return 0;
354 }
355
356 return std::chrono::duration_cast<std::chrono::seconds>(
357 iter->second.waitTP.time_since_epoch()).count();
358 }
359
PollOnce(int timeout)360 PollerRet Poller::PollOnce(int timeout) noexcept
361 {
362 int realTimeout = timeout;
363 int timerHandle = -1;
364
365 timerMutex_.lock();
366 if (!timerMap_.empty()) {
367 auto cur = timerMap_.begin();
368 timerHandle = cur->second.handle;
369 TimePoint now = std::chrono::steady_clock::now();
370 realTimeout = std::chrono::duration_cast<std::chrono::milliseconds>(
371 cur->first - now).count();
372 if (realTimeout <= 0) {
373 ExecuteTimerCb(now);
374 return PollerRet::RET_TIMER;
375 }
376
377 if (timeout != -1 && realTimeout > timeout) {
378 timerHandle = -1;
379 realTimeout = timeout;
380 }
381
382 flag_ = EpollStatus::WAIT;
383 }
384 timerMutex_.unlock();
385
386 pollerCount_++;
387
388 std::array<epoll_event, 1024> waitedEvents;
389 int nfds = epoll_wait(m_epFd, waitedEvents.data(), waitedEvents.size(), realTimeout);
390 flag_ = EpollStatus::WAKE;
391 if (nfds < 0) {
392 if (errno != EINTR) {
393 FFRT_LOGE("epoll_wait error, errorno= %d.", errno);
394 }
395 return PollerRet::RET_NULL;
396 }
397
398 if (nfds == 0) {
399 if (timerHandle != -1) {
400 timerMutex_.lock();
401 for (auto it = timerMap_.begin(); it != timerMap_.end(); it++) {
402 if (it->second.handle == timerHandle) {
403 ExecuteTimerCb(it->first);
404 return PollerRet::RET_TIMER;
405 }
406 }
407 timerMutex_.unlock();
408 }
409 return PollerRet::RET_NULL;
410 }
411
412 std::unordered_map<CPUEUTask*, EventVec> syncTaskEvents;
413 ProcessWaitedFds(nfds, syncTaskEvents, waitedEvents);
414 WakeSyncTask(syncTaskEvents);
415
416 ReleaseFdWakeData();
417 return PollerRet::RET_EPOLL;
418 }
419
ReleaseFdWakeData()420 void Poller::ReleaseFdWakeData() noexcept
421 {
422 std::unique_lock lock(m_mapMutex);
423 for (auto delIter = m_delCntMap.begin(); delIter != m_delCntMap.end();) {
424 int delFd = delIter->first;
425 unsigned int delCnt = static_cast<unsigned int>(delIter->second);
426 auto& wakeDataList = m_wakeDataMap[delFd];
427 int diff = wakeDataList.size() - delCnt;
428 if (diff == 0) {
429 m_wakeDataMap.erase(delFd);
430 m_delCntMap.erase(delIter++);
431 continue;
432 } else if (diff == 1) {
433 for (unsigned int i = 0; i < delCnt - 1; i++) {
434 wakeDataList.pop_front();
435 }
436 m_delCntMap[delFd] = 1;
437 } else {
438 FFRT_LOGD("fd=%d count unexpected, added num=%d, del num=%d", delFd, wakeDataList.size(), delCnt);
439 }
440 delIter++;
441 }
442
443 fdEmpty_.store(m_wakeDataMap.empty());
444 }
445
ProcessTimerDataCb(CPUEUTask * task)446 void Poller::ProcessTimerDataCb(CPUEUTask* task) noexcept
447 {
448 m_mapMutex.lock();
449 auto iter = m_waitTaskMap.find(task);
450 if (iter != m_waitTaskMap.end()) {
451 WakeTask(task);
452 m_waitTaskMap.erase(iter);
453 }
454 m_mapMutex.unlock();
455 }
456
ExecuteTimerCb(TimePoint timer)457 void Poller::ExecuteTimerCb(TimePoint timer) noexcept
458 {
459 std::vector<TimerDataWithCb> timerData;
460 for (auto iter = timerMap_.begin(); iter != timerMap_.end();) {
461 if (iter->first <= timer) {
462 timerData.emplace_back(iter->second);
463 if (iter->second.cb != nullptr) {
464 executedHandle_[iter->second.handle] = TimerStatus::EXECUTING;
465 }
466 iter = timerMap_.erase(iter);
467 continue;
468 }
469 break;
470 }
471 timerEmpty_.store(timerMap_.empty());
472
473 timerMutex_.unlock();
474 for (const auto& data : timerData) {
475 if (data.cb) {
476 data.cb(data.data);
477 } else if (data.task != nullptr) {
478 ProcessTimerDataCb(data.task);
479 }
480
481 if (data.cb != nullptr) {
482 executedHandle_[data.handle] = TimerStatus::EXECUTED;
483 }
484 if (data.repeat) {
485 std::lock_guard lock(timerMutex_);
486 auto iter = executedHandle_.find(data.handle);
487 if (iter != executedHandle_.end()) {
488 executedHandle_.erase(data.handle);
489 RegisterTimerImpl(data);
490 }
491 }
492 }
493 }
494
RegisterTimerImpl(const TimerDataWithCb & data)495 void Poller::RegisterTimerImpl(const TimerDataWithCb& data) noexcept
496 {
497 if (flag_ == EpollStatus::TEARDOWN) {
498 return;
499 }
500
501 TimePoint absoluteTime = std::chrono::steady_clock::now() + std::chrono::milliseconds(data.timeout);
502 bool wake = timerMap_.empty() || (absoluteTime < timerMap_.begin()->first && flag_ == EpollStatus::WAIT);
503
504 timerMap_.emplace(absoluteTime, data);
505 timerEmpty_.store(false);
506
507 if (wake) {
508 WakeUp();
509 }
510 }
511
RegisterTimer(uint64_t timeout,void * data,ffrt_timer_cb cb,bool repeat)512 int Poller::RegisterTimer(uint64_t timeout, void* data, ffrt_timer_cb cb, bool repeat) noexcept
513 {
514 if (flag_ == EpollStatus::TEARDOWN) {
515 return -1;
516 }
517
518 std::lock_guard lock(timerMutex_);
519 timerHandle_ += 1;
520
521 TimerDataWithCb timerMapValue(data, cb, ExecuteCtx::Cur()->task, repeat, timeout);
522 timerMapValue.handle = timerHandle_;
523 RegisterTimerImpl(timerMapValue);
524
525 return timerHandle_;
526 }
527
UnregisterTimer(int handle)528 int Poller::UnregisterTimer(int handle) noexcept
529 {
530 if (flag_ == EpollStatus::TEARDOWN) {
531 return -1;
532 }
533
534 std::lock_guard lock(timerMutex_);
535 auto it = executedHandle_.find(handle);
536 if (it != executedHandle_.end()) {
537 while (it->second == TimerStatus::EXECUTING) {
538 std::this_thread::yield();
539 }
540 executedHandle_.erase(it);
541 return 0;
542 }
543
544 bool wake = false;
545 int ret = -1;
546 for (auto cur = timerMap_.begin(); cur != timerMap_.end(); cur++) {
547 if (cur->second.handle == handle) {
548 if (cur == timerMap_.begin() && flag_ == EpollStatus::WAIT) {
549 wake = true;
550 }
551 timerMap_.erase(cur);
552 ret = 0;
553 break;
554 }
555 }
556
557 timerEmpty_.store(timerMap_.empty());
558
559 if (wake) {
560 WakeUp();
561 }
562 return ret;
563 }
564
DetermineEmptyMap()565 bool Poller::DetermineEmptyMap() noexcept
566 {
567 return fdEmpty_ && timerEmpty_;
568 }
569
DeterminePollerReady()570 bool Poller::DeterminePollerReady() noexcept
571 {
572 return IsFdExist() || IsTimerReady();
573 }
574
IsFdExist()575 bool Poller::IsFdExist() noexcept
576 {
577 return !fdEmpty_;
578 }
579
IsTimerReady()580 bool Poller::IsTimerReady() noexcept
581 {
582 TimePoint now = std::chrono::steady_clock::now();
583 std::lock_guard lock(timerMutex_);
584 if (timerMap_.empty()) {
585 return false;
586 }
587
588 if (now >= timerMap_.begin()->first) {
589 return true;
590 }
591 return false;
592 }
593
GetTimerStatus(int handle)594 ffrt_timer_query_t Poller::GetTimerStatus(int handle) noexcept
595 {
596 if (flag_ == EpollStatus::TEARDOWN) {
597 return ffrt_timer_notfound;
598 }
599
600 std::lock_guard lock(timerMutex_);
601 for (auto cur = timerMap_.begin(); cur != timerMap_.end(); cur++) {
602 if (cur->second.handle == handle) {
603 return ffrt_timer_not_executed;
604 }
605 }
606
607 auto it = executedHandle_.find(handle);
608 if (it != executedHandle_.end()) {
609 while (it->second == TimerStatus::EXECUTING) {
610 std::this_thread::yield();
611 }
612 return ffrt_timer_executed;
613 }
614
615 return ffrt_timer_notfound;
616 }
617
GetPollCount()618 uint8_t Poller::GetPollCount() noexcept
619 {
620 return pollerCount_;
621 }
622 }