1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "cgroup_adjuster.h"
17 
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <cerrno>
21 #include <string>
22 #include "app_mgr_constants.h"
23 #include "cgroup_event_handler.h"
24 #include "cgroup_sched_common.h"
25 #include "cgroup_sched_log.h"
26 #include "hitrace_meter.h"
27 #include "sched_controller.h"
28 #include "ressched_utils.h"
29 #include "res_type.h"
30 #include "wm_common.h"
31 
32 #undef LOG_TAG
33 #define LOG_TAG "CgroupAdjuster"
34 
35 namespace OHOS {
36 namespace ResourceSchedule {
37 using OHOS::AppExecFwk::ApplicationState;
38 using OHOS::AppExecFwk::AbilityState;
39 using OHOS::AppExecFwk::ExtensionState;
40 using OHOS::Rosen::WindowType;
41 constexpr uint32_t MAX_SIZE = 4096;
42 
GetInstance()43 CgroupAdjuster& CgroupAdjuster::GetInstance()
44 {
45     static CgroupAdjuster instance;
46     return instance;
47 }
48 
InitAdjuster()49 void CgroupAdjuster::InitAdjuster()
50 {
51     // Trigger load shared library
52     (void)ResSchedUtils::GetInstance();
53     auto handler = SchedController::GetInstance().GetCgroupEventHandler();
54     if (handler) {
55         handler->PostTask([this] {
56             this->AdjustSelfProcessGroup();
57         });
58     }
59 }
60 
AdjustForkProcessGroup(Application & app,ProcessRecord & pr)61 void CgroupAdjuster::AdjustForkProcessGroup(Application &app, ProcessRecord &pr)
62 {
63     std::string filePath = ResSchedUtils::GetInstance().GetProcessFilePath(app.GetUid(), app.GetName(), pr.GetPid());
64     int fd = open(filePath.c_str(), O_RDONLY);
65     if (fd < 0) {
66         CGS_LOGD("%{public}s File is not opened, error is %{public}s.",
67             __func__, strerror(errno));
68         return;
69     }
70     char fileContent[MAX_SIZE] = {0};
71     int rd = read(fd, fileContent, sizeof(fileContent));
72     if (rd < 0) {
73         CGS_LOGE("%{public}s Read File Error, error is %{public}s.",
74             __func__, strerror(errno));
75     }
76     const char *flag = "\n";
77     char *line = strtok(fileContent, flag);
78     while (line != NULL) {
79         int32_t forkPid = std::atoi(line);
80         if (forkPid == 0) {
81             CGS_LOGE("%{public}s str to int failed, str = %{public}s", __func__, line);
82             line = strtok(NULL, flag);
83             continue;
84         }
85         line = strtok(NULL, flag);
86         if (forkPid != pr.GetPid()) {
87             int ret = CgroupSetting::SetThreadGroupSchedPolicy(forkPid, pr.curSchedGroup_);
88             if (ret != 0) {
89                 CGS_LOGE("%{public}s set %{public}d to group %{public}d failed, ret = %{public}d!",
90                     __func__, forkPid, (int)pr.curSchedGroup_, ret);
91             }
92         } else {
93             continue;
94         }
95     }
96     close(fd);
97     return;
98 }
99 
AdjustProcessGroup(Application & app,ProcessRecord & pr,AdjustSource source)100 void CgroupAdjuster::AdjustProcessGroup(Application &app, ProcessRecord &pr, AdjustSource source)
101 {
102     CGS_LOGI("%{public}s for %{public}d, source : %{public}d", __func__, pr.GetPid(), source);
103     ComputeProcessGroup(app, pr, source);
104     ResSchedUtils::GetInstance().ReportArbitrationResult(app, pr, source);
105     ApplyProcessGroup(app, pr);
106 
107     if (!app.IsHostProcess(pr.GetPid())) {
108         return;
109     }
110 
111     /* Let the sched group of render process, gpu process, and child process follow the sched group of host process */
112     for (const auto &iter : app.GetPidsMap()) {
113         const auto &procRecord = iter.second;
114         if (procRecord && ((procRecord->processType_ == ProcRecordType::RENDER) ||
115             (procRecord->processType_ == ProcRecordType::GPU) ||
116             (procRecord->processType_ == ProcRecordType::CHILD))) {
117             auto hostProcRecord = app.GetProcessRecord(procRecord->hostPid_);
118             if (!hostProcRecord || (procRecord->hostPid_ != pr.GetPid())) {
119                 continue;
120             }
121             CGS_LOGD("%{public}s for %{public}d, source : %{public}d for render process",
122                 __func__, procRecord->GetPid(), source);
123             procRecord->setSchedGroup_ = hostProcRecord->curSchedGroup_;
124             if (procRecord->processType_ == ProcRecordType::RENDER ||
125                 ((procRecord->processType_ == ProcRecordType::GPU) && (hostProcRecord->curSchedGroup_ == SP_TOP_APP))) {
126                 CGS_LOGI("%{public}s for %{public}d, source : %{public}d for render process",
127                     __func__, procRecord->GetPid(), AdjustSource::ADJS_SELF_RENDER_THREAD);
128                 ResSchedUtils::GetInstance().ReportArbitrationResult(app, *(procRecord.get()),
129                     AdjustSource::ADJS_SELF_RENDER_THREAD);
130             }
131             ApplyProcessGroup(app, *procRecord);
132         }
133     }
134 }
135 
AdjustAllProcessGroup(Application & app,AdjustSource source)136 void CgroupAdjuster::AdjustAllProcessGroup(Application &app, AdjustSource source)
137 {
138     for (auto &iter : app.GetPidsMap()) {
139         const auto &procRecord = iter.second;
140         if (procRecord && (procRecord->processType_ != ProcRecordType::RENDER) &&
141             (procRecord->processType_ != ProcRecordType::GPU) &&
142             (procRecord->processType_ != ProcRecordType::LINUX) &&
143             (procRecord->processType_ != ProcRecordType::CHILD)) {
144             AdjustProcessGroup(app, *procRecord, source);
145         }
146     }
147 }
148 
AdjustSelfProcessGroup()149 inline void CgroupAdjuster::AdjustSelfProcessGroup()
150 {
151     int pid = getpid();
152     int group = SP_FOREGROUND;
153     int ret = CgroupSetting::SetThreadGroupSchedPolicy(pid, group);
154     if (ret != 0) {
155         CGS_LOGE("%{public}s set %{public}d to group %{public}d failed, ret=%{public}d!", __func__, pid, group, ret);
156     }
157 }
158 
ComputeProcessGroup(Application & app,ProcessRecord & pr,AdjustSource source)159 void CgroupAdjuster::ComputeProcessGroup(Application &app, ProcessRecord &pr, AdjustSource source)
160 {
161     SchedPolicy group = SP_DEFAULT;
162 
163     {
164         ChronoScope cs("ComputeProcessGroup");
165         if (pr.processType_ == ProcRecordType::RENDER) {
166             auto hostProcRecord = app.GetProcessRecord(pr.hostPid_);
167             group = hostProcRecord ? hostProcRecord->curSchedGroup_ : SP_DEFAULT;
168         } else if (source == AdjustSource::ADJS_PROCESS_CREATE) {
169             group = SP_DEFAULT;
170         } else if (app.focusedProcess_ && (app.focusedProcess_->GetPid() == pr.GetPid())) {
171             group = SP_TOP_APP;
172         } else {
173             if (pr.abilities_.size() == 0) {
174                 group = SP_DEFAULT;
175                 if (pr.processState_ == (int32_t)ApplicationState::APP_STATE_BACKGROUND) {
176                     group = SP_BACKGROUND;
177                 }
178             } else if (pr.IsVisible()) {
179                 group = SP_FOREGROUND;
180             } else if (pr.HasServiceExtension()) {
181                 group = SP_DEFAULT;
182                 if (pr.processState_ == (int32_t)ApplicationState::APP_STATE_BACKGROUND) {
183                     group = SP_BACKGROUND;
184                 }
185             } else {
186                 if (pr.processState_ == (int32_t)ApplicationState::APP_STATE_BACKGROUND) {
187                     group = SP_BACKGROUND;
188                 } else if (pr.processState_ == (int32_t)ApplicationState::APP_STATE_FOREGROUND) {
189                     group = SP_FOREGROUND;
190                 } else {
191                     group = SP_DEFAULT;
192                 }
193             }
194         }
195         pr.setSchedGroup_ = group;
196     } // end ChronoScope
197 }
198 
ApplyProcessGroup(Application & app,ProcessRecord & pr)199 void CgroupAdjuster::ApplyProcessGroup(Application &app, ProcessRecord &pr)
200 {
201     ChronoScope cs("ApplyProcessGroup");
202     if (pr.curSchedGroup_ != pr.setSchedGroup_) {
203         pid_t pid = pr.GetPid();
204         int ret = CgroupSetting::SetThreadGroupSchedPolicy(pid, (int)pr.setSchedGroup_);
205         if (ret != 0) {
206             CGS_LOGE("%{public}s set %{public}d to group %{public}d failed, ret=%{public}d!",
207                 __func__, pid, pr.setSchedGroup_, ret);
208             return;
209         }
210 
211         pr.lastSchedGroup_ = pr.curSchedGroup_;
212         pr.curSchedGroup_ = pr.setSchedGroup_;
213         CGS_LOGI("%{public}s Set %{public}d's cgroup from %{public}d to %{public}d.",
214             __func__, pr.GetPid(), pr.lastSchedGroup_, pr.curSchedGroup_);
215 
216         std::string traceStr(__func__);
217         traceStr.append(" for ").append(std::to_string(pid)).append(", group change from ")
218             .append(std::to_string((int32_t)(pr.lastSchedGroup_))).append(" to ")
219             .append(std::to_string((int32_t)(pr.curSchedGroup_)));
220         StartTrace(HITRACE_TAG_OHOS, traceStr);
221 
222         nlohmann::json payload;
223         payload["pid"] = std::to_string(pr.GetPid());
224         payload["uid"] = std::to_string(pr.GetUid());
225         payload["name"] = app.GetName();
226         payload["oldGroup"] = std::to_string((int32_t)(pr.lastSchedGroup_));
227         payload["newGroup"] = std::to_string((int32_t)(pr.curSchedGroup_));
228         ResSchedUtils::GetInstance().ReportDataInProcess(ResType::RES_TYPE_CGROUP_ADJUSTER, 0, payload);
229         AdjustForkProcessGroup(app, pr);
230         FinishTrace(HITRACE_TAG_OHOS);
231     }
232 }
233 } // namespace ResourceSchedule
234 } // namespace OHOS
235