1 /*
2 * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "socperf.h"
17 #include "hisysevent.h"
18 #include "hitrace_meter.h"
19 #include "parameters.h"
20
21 namespace OHOS {
22 namespace SOCPERF {
23 namespace {
24 const int64_t TIME_INTERVAL = 8;
25 const int32_t CANCEL_CMDID_PREFIX = 100000;
26 }
SocPerf()27 SocPerf::SocPerf()
28 {
29 }
30
~SocPerf()31 SocPerf::~SocPerf()
32 {
33 }
34
Init()35 bool SocPerf::Init()
36 {
37 if (!socPerfConfig_.Init()) {
38 SOC_PERF_LOGE("Failed to init SocPerf config");
39 return false;
40 }
41
42 if (!CreateThreadWraps()) {
43 SOC_PERF_LOGE("Failed to create threadwraps threads");
44 return false;
45 }
46 InitThreadWraps();
47 enabled_ = true;
48 return true;
49 }
50
CreateThreadWraps()51 bool SocPerf::CreateThreadWraps()
52 {
53 #ifdef SOCPERF_ADAPTOR_FFRT
54 socperfThreadWrap_ = std::make_shared<SocPerfThreadWrap>();
55 #else
56 auto runner = AppExecFwk::EventRunner::Create("socperf#runner");
57 if (!runner) {
58 SOC_PERF_LOGE("Failed to Create EventRunner");
59 return false;
60 }
61 socperfThreadWrap_ = std::make_shared<SocPerfThreadWrap>(runner);
62 #endif
63 if (!socperfThreadWrap_) {
64 SOC_PERF_LOGE("Failed to Create socPerfThreadWrap");
65 return false;
66 }
67 SOC_PERF_LOGD("Success to Create All threadWrap threads");
68 return true;
69 }
70
InitThreadWraps()71 void SocPerf::InitThreadWraps()
72 {
73 for (auto iter = socPerfConfig_.resourceNodeInfo_.begin(); iter != socPerfConfig_.resourceNodeInfo_.end(); ++iter) {
74 std::shared_ptr<ResourceNode> resourceNode = iter->second;
75 #ifdef SOCPERF_ADAPTOR_FFRT
76 socperfThreadWrap_->InitResourceNodeInfo(resourceNode);
77 #else
78 auto event = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_INIT_RESOURCE_NODE_INFO, resourceNode);
79 socperfThreadWrap_->SendEvent(event);
80 #endif
81 }
82 }
83
PerfRequest(int32_t cmdId,const std::string & msg)84 void SocPerf::PerfRequest(int32_t cmdId, const std::string& msg)
85 {
86 if (!enabled_ || !perfRequestEnable_) {
87 SOC_PERF_LOGD("SocPerf disabled!");
88 return;
89 }
90 if (!CheckTimeInterval(true, cmdId)) {
91 SOC_PERF_LOGD("cmdId %{public}d can not trigger, because time interval", cmdId);
92 return;
93 }
94 if (socPerfConfig_.perfActionsInfo_.find(cmdId) == socPerfConfig_.perfActionsInfo_.end()) {
95 SOC_PERF_LOGD("Invalid PerfRequest cmdId[%{public}d]", cmdId);
96 return;
97 }
98
99 int32_t matchCmdId = MatchDeviceModeCmd(cmdId, false);
100 SOC_PERF_LOGD("cmdId[%{public}d]matchCmdId[%{public}d]msg[%{public}s]", cmdId, matchCmdId, msg.c_str());
101
102 std::string trace_str(__func__);
103 trace_str.append(",cmdId[").append(std::to_string(matchCmdId)).append("]");
104 trace_str.append(",msg[").append(msg).append("]");
105 StartTrace(HITRACE_TAG_OHOS, trace_str, -1);
106 DoFreqActions(socPerfConfig_.perfActionsInfo_[matchCmdId], EVENT_INVALID, ACTION_TYPE_PERF);
107 FinishTrace(HITRACE_TAG_OHOS);
108 UpdateCmdIdCount(cmdId);
109 }
110
PerfRequestEx(int32_t cmdId,bool onOffTag,const std::string & msg)111 void SocPerf::PerfRequestEx(int32_t cmdId, bool onOffTag, const std::string& msg)
112 {
113 if (!enabled_ || !perfRequestEnable_) {
114 SOC_PERF_LOGD("SocPerf disabled!");
115 return;
116 }
117 if (socPerfConfig_.perfActionsInfo_.find(cmdId) == socPerfConfig_.perfActionsInfo_.end()) {
118 SOC_PERF_LOGD("Invalid PerfRequestEx cmdId[%{public}d]", cmdId);
119 return;
120 }
121 if (!CheckTimeInterval(onOffTag, cmdId)) {
122 SOC_PERF_LOGD("cmdId %{public}d can not trigger, because time interval", cmdId);
123 return;
124 }
125 int32_t matchCmdId = MatchDeviceModeCmd(cmdId, true);
126 SOC_PERF_LOGD("cmdId[%{public}d]matchCmdId[%{public}d]onOffTag[%{public}d]msg[%{public}s]",
127 cmdId, matchCmdId, onOffTag, msg.c_str());
128
129 std::string trace_str(__func__);
130 trace_str.append(",cmdId[").append(std::to_string(matchCmdId)).append("]");
131 trace_str.append(",onOff[").append(std::to_string(onOffTag)).append("]");
132 trace_str.append(",msg[").append(msg).append("]");
133 StartTrace(HITRACE_TAG_OHOS, trace_str, -1);
134 DoFreqActions(socPerfConfig_.perfActionsInfo_[matchCmdId], onOffTag ? EVENT_ON : EVENT_OFF, ACTION_TYPE_PERF);
135 FinishTrace(HITRACE_TAG_OHOS);
136 if (onOffTag) {
137 UpdateCmdIdCount(cmdId);
138 }
139 }
140
PowerLimitBoost(bool onOffTag,const std::string & msg)141 void SocPerf::PowerLimitBoost(bool onOffTag, const std::string& msg)
142 {
143 if (!enabled_) {
144 SOC_PERF_LOGD("SocPerf disabled!");
145 return;
146 }
147 SOC_PERF_LOGI("onOffTag[%{public}d]msg[%{public}s]", onOffTag, msg.c_str());
148
149 std::string trace_str(__func__);
150 trace_str.append(",onOff[").append(std::to_string(onOffTag)).append("]");
151 trace_str.append(",msg[").append(msg).append("]");
152 StartTrace(HITRACE_TAG_OHOS, trace_str, -1);
153 #ifdef SOCPERF_ADAPTOR_FFRT
154 socperfThreadWrap_->UpdatePowerLimitBoostFreq(onOffTag);
155 #else
156 auto event = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_POWER_LIMIT_BOOST_FREQ, onOffTag ? 1 : 0);
157 socperfThreadWrap_->SendEvent(event);
158 #endif
159 HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::RSS, "LIMIT_BOOST",
160 OHOS::HiviewDFX::HiSysEvent::EventType::BEHAVIOR,
161 "CLIENT_ID", ACTION_TYPE_POWER,
162 "ON_OFF_TAG", onOffTag);
163 FinishTrace(HITRACE_TAG_OHOS);
164 }
165
ThermalLimitBoost(bool onOffTag,const std::string & msg)166 void SocPerf::ThermalLimitBoost(bool onOffTag, const std::string& msg)
167 {
168 if (!enabled_) {
169 SOC_PERF_LOGD("SocPerf disabled!");
170 return;
171 }
172 SOC_PERF_LOGI("onOffTag[%{public}d]msg[%{public}s]", onOffTag, msg.c_str());
173 std::string trace_str(__func__);
174 trace_str.append(",onOff[").append(std::to_string(onOffTag)).append("]");
175 trace_str.append(",msg[").append(msg).append("]");
176 StartTrace(HITRACE_TAG_OHOS, trace_str, -1);
177 #ifdef SOCPERF_ADAPTOR_FFRT
178 socperfThreadWrap_->UpdateThermalLimitBoostFreq(onOffTag);
179 #else
180 auto event = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_THERMAL_LIMIT_BOOST_FREQ, onOffTag ? 1 : 0);
181 socperfThreadWrap_->SendEvent(event);
182 #endif
183 HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::RSS, "LIMIT_BOOST",
184 OHOS::HiviewDFX::HiSysEvent::EventType::BEHAVIOR,
185 "CLIENT_ID", ACTION_TYPE_THERMAL,
186 "ON_OFF_TAG", onOffTag);
187 FinishTrace(HITRACE_TAG_OHOS);
188 }
189
SendLimitRequestEventOff(std::shared_ptr<SocPerfThreadWrap> threadWrap,int32_t clientId,int32_t resId,int32_t eventId)190 void SocPerf::SendLimitRequestEventOff(std::shared_ptr<SocPerfThreadWrap> threadWrap,
191 int32_t clientId, int32_t resId, int32_t eventId)
192 {
193 auto iter = limitRequest_[clientId].find(resId);
194 if (iter != limitRequest_[clientId].end()
195 && limitRequest_[clientId][resId] != INVALID_VALUE) {
196 auto resAction = std::make_shared<ResAction>(
197 limitRequest_[clientId][resId], 0, clientId, EVENT_OFF, -1, MAX_INT_VALUE);
198 #ifdef SOCPERF_ADAPTOR_FFRT
199 threadWrap->UpdateLimitStatus(eventId, resAction, resId);
200 #else
201 auto event = AppExecFwk::InnerEvent::Get(eventId, resAction, resId);
202 threadWrap->SendEvent(event);
203 #endif
204 limitRequest_[clientId].erase(iter);
205 }
206 }
207
SendLimitRequestEventOn(std::shared_ptr<SocPerfThreadWrap> threadWrap,int32_t clientId,int32_t resId,int64_t resValue,int32_t eventId)208 void SocPerf::SendLimitRequestEventOn(std::shared_ptr<SocPerfThreadWrap> threadWrap,
209 int32_t clientId, int32_t resId, int64_t resValue, int32_t eventId)
210 {
211 if (resValue != INVALID_VALUE && resValue != RESET_VALUE) {
212 auto resAction = std::make_shared<ResAction>(resValue, 0, clientId, EVENT_ON, -1, MAX_INT_VALUE);
213 #ifdef SOCPERF_ADAPTOR_FFRT
214 threadWrap->UpdateLimitStatus(eventId, resAction, resId);
215 #else
216 auto event = AppExecFwk::InnerEvent::Get(eventId, resAction, resId);
217 threadWrap->SendEvent(event);
218 #endif
219 limitRequest_[clientId].insert(std::pair<int32_t, int32_t>(resId, resValue));
220 }
221 }
222
SendLimitRequestEvent(int32_t clientId,int32_t resId,int64_t resValue)223 void SocPerf::SendLimitRequestEvent(int32_t clientId, int32_t resId, int64_t resValue)
224 {
225 int32_t eventId = 0;
226 int32_t realResId = 0;
227 int32_t levelResId = 0;
228 if (resId > RES_ID_ADDITION) {
229 realResId = resId - RES_ID_ADDITION;
230 levelResId = resId;
231 eventId = INNER_EVENT_ID_DO_FREQ_ACTION_LEVEL;
232 } else {
233 realResId = resId;
234 levelResId = resId + RES_ID_ADDITION;
235 eventId = INNER_EVENT_ID_DO_FREQ_ACTION;
236 }
237
238 if (!socPerfConfig_.IsValidResId(realResId)) {
239 return;
240 }
241 std::lock_guard<std::mutex> lock(mutex_);
242 SendLimitRequestEventOff(socperfThreadWrap_, clientId, realResId, INNER_EVENT_ID_DO_FREQ_ACTION);
243 SendLimitRequestEventOff(socperfThreadWrap_, clientId, levelResId, INNER_EVENT_ID_DO_FREQ_ACTION_LEVEL);
244 SendLimitRequestEventOn(socperfThreadWrap_, clientId, resId, resValue, eventId);
245 }
246
LimitRequest(int32_t clientId,const std::vector<int32_t> & tags,const std::vector<int64_t> & configs,const std::string & msg)247 void SocPerf::LimitRequest(int32_t clientId,
248 const std::vector<int32_t>& tags, const std::vector<int64_t>& configs, const std::string& msg)
249 {
250 if (!enabled_) {
251 SOC_PERF_LOGE("SocPerf disabled!");
252 return;
253 }
254 if (tags.size() != configs.size()) {
255 SOC_PERF_LOGE("tags'size and configs' size must be the same!");
256 return;
257 }
258 if (clientId <= (int32_t)ACTION_TYPE_PERF || clientId >= (int32_t)ACTION_TYPE_MAX) {
259 SOC_PERF_LOGE("clientId must be between ACTION_TYPE_PERF and ACTION_TYPE_MAX!");
260 return;
261 }
262 for (int32_t i = 0; i < (int32_t)tags.size(); i++) {
263 SOC_PERF_LOGI("clientId[%{public}d],tags[%{public}d],configs[%{public}lld],msg[%{public}s]",
264 clientId, tags[i], (long long)configs[i], msg.c_str());
265 SendLimitRequestEvent(clientId, tags[i], configs[i]);
266 }
267 }
268
SetRequestStatus(bool status,const std::string & msg)269 void SocPerf::SetRequestStatus(bool status, const std::string& msg)
270 {
271 SOC_PERF_LOGI("requestEnable is changed to %{public}d, the reason is %{public}s", status, msg.c_str());
272 perfRequestEnable_ = status;
273 /* disable socperf sever, we should clear all alive request to avoid high freq for long time */
274 if (!perfRequestEnable_) {
275 ClearAllAliveRequest();
276 }
277 }
278
ClearAllAliveRequest()279 void SocPerf::ClearAllAliveRequest()
280 {
281 if (!enabled_) {
282 SOC_PERF_LOGE("SocPerf disabled!");
283 return;
284 }
285 #ifdef SOCPERF_ADAPTOR_FFRT
286 socperfThreadWrap_->ClearAllAliveRequest();
287 #else
288 auto event = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_CLEAR_ALL_ALIVE_REQUEST);
289 socperfThreadWrap_->SendEvent(event);
290 #endif
291 }
292
SetThermalLevel(int32_t level)293 void SocPerf::SetThermalLevel(int32_t level)
294 {
295 thermalLvl_ = level;
296 }
297
DoPerfRequestThremalLvl(int32_t cmdId,std::shared_ptr<Action> action,int32_t onOff)298 bool SocPerf::DoPerfRequestThremalLvl(int32_t cmdId, std::shared_ptr<Action> action, int32_t onOff)
299 {
300 if (socPerfConfig_.perfActionsInfo_[action->thermalCmdId_] == nullptr) {
301 SOC_PERF_LOGE("cmd %{public}d is not exist", action->thermalCmdId_);
302 return false;
303 }
304 // init DoFreqActions param
305 std::string thermalLvlTag = std::string("ThremalLvl_").append(std::to_string(action->thermalCmdId_))
306 .append("_").append(std::to_string(thermalLvl_));
307 std::shared_ptr<Actions> perfLvlActionCmd = std::make_shared<Actions>(cmdId, thermalLvlTag);
308 std::shared_ptr<Action> perfLvlAction = std::make_shared<Action>();
309 // perfrequest thermal level action's duration is same as trigger
310 perfLvlAction->duration = action->duration;
311 std::shared_ptr<Actions> cmdConfig = socPerfConfig_.perfActionsInfo_[action->thermalCmdId_];
312
313 // select the Nearest thermallevel action
314 std::shared_ptr<Action> actionConfig = *(cmdConfig->actionList.begin());
315 for (auto iter = cmdConfig->actionList.begin(); iter != cmdConfig->actionList.end(); iter++) {
316 if (perfLvlAction->thermalLvl_ <= (*iter)->thermalLvl_ && (*iter)->thermalLvl_ <= thermalLvl_) {
317 actionConfig = *iter;
318 }
319 }
320 if (thermalLvl_ < actionConfig->thermalLvl_) {
321 SOC_PERF_LOGE("thermal level is too low to trigger perf request level");
322 return false;
323 }
324
325 // fill in the item of perfLvlAction
326 perfLvlAction->thermalLvl_ = actionConfig->thermalLvl_;
327 perfLvlAction->thermalCmdId_ = INVALID_THERMAL_CMD_ID;
328 for (uint32_t i = 0; i < actionConfig->variable.size(); i++) {
329 perfLvlAction->variable.push_back(actionConfig->variable[i]);
330 }
331 perfLvlActionCmd->actionList.push_back(perfLvlAction);
332
333 // send cmd to socperf server wrapper
334 DoFreqActions(perfLvlActionCmd, onOff, ACTION_TYPE_PERFLVL);
335 return true;
336 }
337
DoFreqActions(std::shared_ptr<Actions> actions,int32_t onOff,int32_t actionType)338 void SocPerf::DoFreqActions(std::shared_ptr<Actions> actions, int32_t onOff, int32_t actionType)
339 {
340 std::shared_ptr<ResActionItem> header = nullptr;
341 std::shared_ptr<ResActionItem> curItem = nullptr;
342 auto now = std::chrono::system_clock::now();
343 int64_t curMs = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count();
344 for (auto iter = actions->actionList.begin(); iter != actions->actionList.end(); iter++) {
345 std::shared_ptr<Action> action = *iter;
346 // process thermal level
347 if (action->thermalCmdId_ != INVALID_THERMAL_CMD_ID && thermalLvl_ > MIN_THERMAL_LVL) {
348 DoPerfRequestThremalLvl(actions->id, action, onOff);
349 }
350 for (int32_t i = 0; i < (int32_t)action->variable.size() - 1; i += RES_ID_AND_VALUE_PAIR) {
351 if (!socPerfConfig_.IsValidResId(action->variable[i])) {
352 continue;
353 }
354
355 if (onOff == EVENT_INVALID && action->duration == 0) {
356 continue;
357 }
358
359 auto resActionItem = std::make_shared<ResActionItem>(action->variable[i]);
360 int64_t endTime = action->duration == 0 ? MAX_INT_VALUE : curMs + action->duration;
361 resActionItem->resAction = std::make_shared<ResAction>(action->variable[i + 1], action->duration,
362 actionType, onOff, actions->id, endTime);
363 if (curItem) {
364 curItem->next = resActionItem;
365 } else {
366 header = resActionItem;
367 }
368 curItem = resActionItem;
369 }
370 }
371 #ifdef SOCPERF_ADAPTOR_FFRT
372 socperfThreadWrap_->DoFreqActionPack(header);
373 socperfThreadWrap_->PostDelayTask(header);
374 #else
375 auto event = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_DO_FREQ_ACTION_PACK, header);
376 socperfThreadWrap_->SendEvent(event);
377 std::shared_ptr<ResActionItem> queueHead = header;
378 while (queueHead) {
379 auto eventRes = AppExecFwk::InnerEvent::Get(INNER_EVENT_ID_DO_FREQ_ACTION_DELAYED, queueHead->resAction,
380 queueHead->resId);
381 socperfThreadWrap_->SendEvent(eventRes, queueHead->resAction->duration);
382 queueHead = queueHead->next;
383 }
384 #endif
385 }
386
RequestDeviceMode(const std::string & mode,bool status)387 void SocPerf::RequestDeviceMode(const std::string& mode, bool status)
388 {
389 SOC_PERF_LOGD("device mode %{public}s status changed to %{public}d", mode.c_str(), status);
390
391 if (mode.empty() || mode.length() > MAX_RES_MODE_LEN) {
392 return;
393 }
394
395 auto iter = MUTEX_MODE.find(mode);
396 std::lock_guard<std::mutex> lock(mutexDeviceMode_);
397 if (status) {
398 if (iter != MUTEX_MODE.end()) {
399 for (auto res : iter->second) {
400 recordDeviceMode_.erase(res);
401 }
402 }
403 recordDeviceMode_.insert(mode);
404 } else {
405 recordDeviceMode_.erase(mode);
406 }
407 }
408
MatchDeviceModeCmd(int32_t cmdId,bool isTagOnOff)409 int32_t SocPerf::MatchDeviceModeCmd(int32_t cmdId, bool isTagOnOff)
410 {
411 std::shared_ptr<Actions> actions = socPerfConfig_.perfActionsInfo_[cmdId];
412 if (actions->modeMap.empty() || (isTagOnOff && actions->isLongTimePerf)) {
413 return cmdId;
414 }
415
416 std::lock_guard<std::mutex> lock(mutexDeviceMode_);
417 if (recordDeviceMode_.empty()) {
418 return cmdId;
419 }
420
421 for (auto mode : recordDeviceMode_) {
422 auto iter = actions->modeMap.find(mode);
423 if (iter != actions->modeMap.end()) {
424 int32_t deviceCmdId = iter->second;
425 if (socPerfConfig_.perfActionsInfo_.find(deviceCmdId) == socPerfConfig_.perfActionsInfo_.end()) {
426 SOC_PERF_LOGW("Invaild actions cmdid %{public}d", deviceCmdId);
427 return cmdId;
428 }
429 if (isTagOnOff && socPerfConfig_.perfActionsInfo_[deviceCmdId]->isLongTimePerf) {
430 SOC_PERF_LOGD("long time perf not match cmdId %{public}d", deviceCmdId);
431 return cmdId;
432 }
433 return deviceCmdId;
434 }
435 }
436 return cmdId;
437 }
438
UpdateCmdIdCount(int32_t cmdId)439 void SocPerf::UpdateCmdIdCount(int32_t cmdId)
440 {
441 std::lock_guard<std::mutex> lock(mutexBoostCmdCount_);
442 if (boostCmdCount_.find(cmdId) == boostCmdCount_.end()) {
443 boostCmdCount_[cmdId] = 0;
444 }
445 boostCmdCount_[cmdId]++;
446 }
447
RequestCmdIdCount(const std::string & msg)448 std::string SocPerf::RequestCmdIdCount(const std::string &msg)
449 {
450 std::lock_guard<std::mutex> lock(mutexBoostCmdCount_);
451 std::stringstream ret;
452 for (const auto& pair : boostCmdCount_) {
453 if (ret.str().length() > 0) {
454 ret << ",";
455 }
456 ret << pair.first << ":" << pair.second;
457 }
458 return ret.str();
459 }
460
CheckTimeInterval(bool onOff,int32_t cmdId)461 bool SocPerf::CheckTimeInterval(bool onOff, int32_t cmdId)
462 {
463 std::lock_guard<std::mutex> lock(mutexBoostTime_);
464 auto now = std::chrono::system_clock::now();
465 uint64_t curMs = static_cast<uint64_t>(
466 std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count());
467 int32_t cancelCmdId = cmdId + CANCEL_CMDID_PREFIX;
468 int32_t recordCmdId = cmdId;
469 if (onOff) {
470 boostTime_[cancelCmdId] = 0;
471 }
472 if (!onOff) {
473 recordCmdId = cancelCmdId;
474 }
475 if (boostTime_.find(recordCmdId) == boostTime_.end()) {
476 boostTime_[recordCmdId] = curMs;
477 return true;
478 }
479 if (curMs - boostTime_[recordCmdId] > TIME_INTERVAL) {
480 boostTime_[recordCmdId] = curMs;
481 return true;
482 }
483 return false;
484 }
485 } // namespace SOCPERF
486 } // namespace OHOS
487