1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "memory/rs_memory_manager.h"
17
18 #include <fstream>
19 #include <malloc.h>
20 #include <sstream>
21 #include <string>
22 #include "include/core/SkGraphics.h"
23 #include "rs_trace.h"
24 #include "third_party/cJSON/cJSON.h"
25
26 #include "memory/rs_dfx_string.h"
27 #include "skia_adapter/rs_skia_memory_tracer.h"
28 #include "skia_adapter/skia_graphics.h"
29 #include "memory/rs_memory_graphic.h"
30 #include "include/gpu/GrDirectContext.h"
31 #include "src/gpu/GrDirectContextPriv.h"
32
33 #include "common/rs_background_thread.h"
34 #include "common/rs_obj_abs_geometry.h"
35 #include "common/rs_singleton.h"
36 #include "memory/rs_tag_tracker.h"
37 #ifdef NEW_RENDER_CONTEXT
38 #include "render_context/memory_handler.h"
39 #endif
40 #include "pipeline/rs_main_thread.h"
41 #include "pipeline/rs_surface_render_node.h"
42 #include "platform/common/rs_log.h"
43 #include "platform/common/rs_system_properties.h"
44
45 #include "app_mgr_client.h"
46 #include "hisysevent.h"
47 #include "image/gpu_context.h"
48
49 #ifdef RS_ENABLE_VK
50 #include "pipeline/rs_vk_image_manager.h"
51 #include "platform/ohos/backend/rs_vulkan_context.h"
52 #endif
53 #ifdef RES_SCHED_ENABLE
54 #include "res_sched_client.h"
55 #include "res_sched_kill_reason.h"
56 #endif
57
58 namespace OHOS::Rosen {
59 namespace {
60 const std::string KERNEL_CONFIG_PATH = "/system/etc/hiview/kernel_leak_config.json";
61 constexpr uint32_t MEMUNIT_RATE = 1024;
62 constexpr uint32_t MEMORY_REPORT_INTERVAL = 24 * 60 * 60 * 1000; // Each process can report at most once a day.
63 constexpr uint32_t FRAME_NUMBER = 10; // Check memory every ten frames.
64 constexpr const char* MEM_RS_TYPE = "renderservice";
65 constexpr const char* MEM_CPU_TYPE = "cpu";
66 constexpr const char* MEM_GPU_TYPE = "gpu";
67 constexpr const char* MEM_JEMALLOC_TYPE = "jemalloc";
68 constexpr const char* MEM_SNAPSHOT = "snapshot";
69 constexpr int DUPM_STRING_BUF_SIZE = 4000;
70 }
71
72 std::mutex MemoryManager::mutex_;
73 std::unordered_map<pid_t, std::pair<std::string, uint64_t>> MemoryManager::pidInfo_;
74 uint32_t MemoryManager::frameCount_ = 0;
75 uint64_t MemoryManager::memoryWarning_ = UINT64_MAX;
76 uint64_t MemoryManager::gpuMemoryControl_ = UINT64_MAX;
77 uint64_t MemoryManager::totalMemoryReportTime_ = 0;
78
DumpMemoryUsage(DfxString & log,std::string & type)79 void MemoryManager::DumpMemoryUsage(DfxString& log, std::string& type)
80 {
81 if (type.empty() || type == MEM_RS_TYPE) {
82 DumpRenderServiceMemory(log);
83 }
84 if (type.empty() || type == MEM_CPU_TYPE) {
85 DumpDrawingCpuMemory(log);
86 }
87 if (type.empty() || type == MEM_GPU_TYPE) {
88 RSUniRenderThread::Instance().DumpMem(log);
89 }
90 if (type.empty() || type == MEM_JEMALLOC_TYPE) {
91 std::string out;
92 DumpMallocStat(out);
93 log.AppendFormat("%s\n... detail dump at hilog\n", out.c_str());
94 }
95 if (type.empty() || type == MEM_SNAPSHOT) {
96 DumpMemorySnapshot(log);
97 }
98 }
99
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)100 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
101 {
102 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
103 if (!gpuContext) {
104 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
105 return;
106 }
107 RS_TRACE_NAME_FMT("ReleaseAllGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
108 tag.fPid, tag.fTid, tag.fWid, tag.fFid);
109 gpuContext->ReleaseByTag(tag);
110 #endif
111 }
112
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,pid_t pid)113 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, pid_t pid)
114 {
115 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
116 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseAllGpuResource");
117 ReleaseAllGpuResource(gpuContext, tag);
118 #endif
119 }
120
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)121 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
122 {
123 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
124 if (!gpuContext) {
125 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
126 return;
127 }
128 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
129 tag.fPid, tag.fTid, tag.fWid, tag.fFid);
130 gpuContext->PurgeUnlockedResourcesByTag(false, tag);
131 #endif
132 }
133
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,std::set<pid_t> exitedPidSet)134 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, std::set<pid_t> exitedPidSet)
135 {
136 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
137 if (!gpuContext) {
138 RS_LOGE("ReleaseGpuResByPid fail, gpuContext is nullptr");
139 return;
140 }
141 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource exitedPidSet size: %d", exitedPidSet.size());
142 gpuContext->PurgeUnlockedResourcesByPid(false, exitedPidSet);
143 MemorySnapshot::Instance().EraseSnapshotInfoByPid(exitedPidSet);
144 ErasePidInfo(exitedPidSet);
145 #endif
146 }
147
PurgeCacheBetweenFrames(Drawing::GPUContext * gpuContext,bool scratchResourceOnly,std::set<pid_t> & exitedPidSet,std::set<pid_t> & protectedPidSet)148 void MemoryManager::PurgeCacheBetweenFrames(Drawing::GPUContext* gpuContext, bool scratchResourceOnly,
149 std::set<pid_t>& exitedPidSet, std::set<pid_t>& protectedPidSet)
150 {
151 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
152 if (!gpuContext) {
153 RS_LOGE("PurgeCacheBetweenFrames fail, gpuContext is nullptr");
154 return;
155 }
156 gpuContext->PurgeCacheBetweenFrames(scratchResourceOnly, exitedPidSet, protectedPidSet);
157 #endif
158 }
159
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,NodeId surfaceNodeId)160 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, NodeId surfaceNodeId)
161 {
162 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
163 Drawing::GPUResourceTag tag(ExtractPid(surfaceNodeId), 0, 0, 0, "ReleaseUnlockGpuResource");
164 ReleaseUnlockGpuResource(grContext, tag);
165 #endif
166 }
167
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,pid_t pid)168 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, pid_t pid)
169 {
170 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
171 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
172 ReleaseUnlockGpuResource(grContext, tag); // clear gpu resource by pid
173 #endif
174 }
175
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,bool scratchResourcesOnly)176 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, bool scratchResourcesOnly)
177 {
178 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
179 if (!gpuContext) {
180 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
181 return;
182 }
183 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource scratchResourcesOnly:%d", scratchResourcesOnly);
184 gpuContext->PurgeUnlockedResources(scratchResourcesOnly);
185 #endif
186 }
187
ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext * gpuContext)188 void MemoryManager::ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext* gpuContext)
189 {
190 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
191 if (!gpuContext) {
192 RS_LOGE("ReleaseUnlockAndSafeCacheGpuResource fail, gpuContext is nullptr");
193 return;
194 }
195 RS_TRACE_NAME_FMT("ReleaseUnlockAndSafeCacheGpuResource");
196 gpuContext->PurgeUnlockAndSafeCacheGpuResources();
197 #endif
198 }
199
SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext * gpuContext,bool enabled)200 void MemoryManager::SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext* gpuContext, bool enabled)
201 {
202 #if defined(RS_ENABLE_VK)
203 if (!gpuContext) {
204 RS_LOGE("SetGpuCacheSuppressWindowSwitch fail, gpuContext is nullptr");
205 return;
206 }
207 gpuContext->SetGpuCacheSuppressWindowSwitch(enabled);
208 #endif
209 }
210
SetGpuMemoryAsyncReclaimerSwitch(Drawing::GPUContext * gpuContext,bool enabled,const std::function<void ()> & setThreadPriority)211 void MemoryManager::SetGpuMemoryAsyncReclaimerSwitch(
212 Drawing::GPUContext* gpuContext, bool enabled, const std::function<void()>& setThreadPriority)
213 {
214 #if defined(RS_ENABLE_VK)
215 if (!gpuContext) {
216 RS_LOGE("SetGpuMemoryAsyncReclaimerSwitch fail, gpuContext is nullptr");
217 return;
218 }
219 gpuContext->SetGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
220 #endif
221 }
222
FlushGpuMemoryInWaitQueue(Drawing::GPUContext * gpuContext)223 void MemoryManager::FlushGpuMemoryInWaitQueue(Drawing::GPUContext* gpuContext)
224 {
225 #if defined(RS_ENABLE_VK)
226 if (!gpuContext) {
227 RS_LOGE("FlushGpuMemoryInWaitQueue fail, gpuContext is nullptr");
228 return;
229 }
230 gpuContext->FlushGpuMemoryInWaitQueue();
231 #endif
232 }
233
SuppressGpuCacheBelowCertainRatio(Drawing::GPUContext * gpuContext,const std::function<bool (void)> & nextFrameHasArrived)234 void MemoryManager::SuppressGpuCacheBelowCertainRatio(
235 Drawing::GPUContext* gpuContext, const std::function<bool(void)>& nextFrameHasArrived)
236 {
237 #if defined(RS_ENABLE_VK)
238 if (!gpuContext) {
239 RS_LOGE("SuppressGpuCacheBelowCertainRatio fail, gpuContext is nullptr");
240 return;
241 }
242 gpuContext->SuppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
243 #endif
244 }
245
GetAppGpuMemoryInMB(Drawing::GPUContext * gpuContext)246 float MemoryManager::GetAppGpuMemoryInMB(Drawing::GPUContext* gpuContext)
247 {
248 if (!gpuContext) {
249 return 0.f;
250 }
251 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
252 Drawing::TraceMemoryDump trace("category", true);
253 gpuContext->DumpMemoryStatistics(&trace);
254 auto total = trace.GetGpuMemorySizeInMB();
255 float rsMemSize = 0.f;
256 for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
257 Drawing::GPUResourceTag resourceTag(0, 0, 0, tagtype,
258 RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype)));
259 Drawing::TraceMemoryDump gpuTrace("category", true);
260 gpuContext->DumpMemoryStatisticsByTag(&gpuTrace, resourceTag);
261 rsMemSize += gpuTrace.GetGpuMemorySizeInMB();
262 }
263 return total - rsMemSize;
264 #else
265 return 0.f;
266 #endif
267 }
268
DumpPidMemory(DfxString & log,int pid,const Drawing::GPUContext * gpuContext)269 void MemoryManager::DumpPidMemory(DfxString& log, int pid, const Drawing::GPUContext* gpuContext)
270 {
271 MemoryGraphic mem = CountPidMemory(pid, gpuContext);
272 log.AppendFormat("GPU Mem(MB):%f\n", mem.GetGpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
273 log.AppendFormat("CPU Mem(KB):%f\n", mem.GetCpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
274 log.AppendFormat("Total Mem(MB):%f\n", mem.GetTotalMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
275 }
276
CountPidMemory(int pid,const Drawing::GPUContext * gpuContext)277 MemoryGraphic MemoryManager::CountPidMemory(int pid, const Drawing::GPUContext* gpuContext)
278 {
279 MemoryGraphic totalMemGraphic;
280
281 // Count mem of RS
282 totalMemGraphic.SetPid(pid);
283
284 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
285 // Count mem of Skia GPU
286 if (gpuContext) {
287 Drawing::TraceMemoryDump gpuTracer("category", true);
288 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
289 gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, tag);
290 float gpuMem = gpuTracer.GetGLMemorySize();
291 totalMemGraphic.IncreaseGpuMemory(gpuMem);
292 }
293 #endif
294
295 return totalMemGraphic;
296 }
297
CountMemory(std::vector<pid_t> pids,const Drawing::GPUContext * gpuContext,std::vector<MemoryGraphic> & mems)298 void MemoryManager::CountMemory(
299 std::vector<pid_t> pids, const Drawing::GPUContext* gpuContext, std::vector<MemoryGraphic>& mems)
300 {
301 auto countMem = [&gpuContext, &mems] (pid_t pid) {
302 mems.emplace_back(CountPidMemory(pid, gpuContext));
303 };
304 // Count mem of Skia GPU
305 std::for_each(pids.begin(), pids.end(), countMem);
306 }
307
FindGeoById(uint64_t nodeId)308 static std::tuple<uint64_t, std::string, RectI> FindGeoById(uint64_t nodeId)
309 {
310 const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
311 auto node = nodeMap.GetRenderNode<RSRenderNode>(nodeId);
312 uint64_t windowId = nodeId;
313 std::string windowName = "NONE";
314 RectI nodeFrameRect;
315 if (!node) {
316 return { windowId, windowName, nodeFrameRect };
317 }
318 nodeFrameRect =
319 (node->GetRenderProperties().GetBoundsGeometry())->GetAbsRect();
320 // Obtain the window according to childId
321 auto parent = node->GetParent().lock();
322 bool windowsNameFlag = false;
323 while (parent) {
324 if (parent->IsInstanceOf<RSSurfaceRenderNode>()) {
325 const auto& surfaceNode = RSBaseRenderNode::ReinterpretCast<RSSurfaceRenderNode>(parent);
326 windowName = surfaceNode->GetName();
327 windowId = surfaceNode->GetId();
328 windowsNameFlag = true;
329 break;
330 }
331 parent = parent->GetParent().lock();
332 }
333 if (!windowsNameFlag) {
334 windowName = "EXISTS-BUT-NO-SURFACE";
335 }
336 return { windowId, windowName, nodeFrameRect };
337 }
338
DumpRenderServiceMemory(DfxString & log)339 void MemoryManager::DumpRenderServiceMemory(DfxString& log)
340 {
341 log.AppendFormat("\n----------\nRenderService caches:\n");
342 MemoryTrack::Instance().DumpMemoryStatistics(log, FindGeoById);
343 }
344
DumpDrawingCpuMemory(DfxString & log)345 void MemoryManager::DumpDrawingCpuMemory(DfxString& log)
346 {
347 // CPU
348 std::string cpuInfo = "Skia CPU caches : pid:" + std::to_string(getpid()) +
349 ", threadId:" + std::to_string(gettid());
350 #ifdef ROSEN_OHOS
351 char threadName[16]; // thread name is restricted to 16 bytes
352 auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
353 if (result == 0) {
354 cpuInfo = cpuInfo + ", threadName: " + threadName;
355 }
356 #endif
357 log.AppendFormat("\n----------\n%s\n", cpuInfo.c_str());
358 log.AppendFormat("Font Cache (CPU):\n");
359 log.AppendFormat(" Size: %.2f kB \n", Drawing::SkiaGraphics::GetFontCacheUsed() / MEMUNIT_RATE);
360 log.AppendFormat(" Glyph Count: %d \n", Drawing::SkiaGraphics::GetFontCacheCountUsed());
361
362 std::vector<ResourcePair> cpuResourceMap = {
363 { "skia/sk_resource_cache/bitmap_", "Bitmaps" },
364 { "skia/sk_resource_cache/rrect-blur_", "Masks" },
365 { "skia/sk_resource_cache/rects-blur_", "Masks" },
366 { "skia/sk_resource_cache/tessellated", "Shadows" },
367 { "skia/sk_resource_cache/yuv-planes_", "YUVPlanes" },
368 { "skia/sk_resource_cache/budget_glyph_count", "Bitmaps" },
369 };
370 SkiaMemoryTracer cpuTracer(cpuResourceMap, true);
371 Drawing::SkiaGraphics::DumpMemoryStatistics(&cpuTracer);
372 log.AppendFormat("CPU Cachesxx:\n");
373 cpuTracer.LogOutput(log);
374 log.AppendFormat("Total CPU memory usage:\n");
375 cpuTracer.LogTotals(log);
376
377 // cache limit
378 size_t cacheLimit = Drawing::SkiaGraphics::GetResourceCacheTotalByteLimit();
379 size_t fontCacheLimit = Drawing::SkiaGraphics::GetFontCacheLimit();
380 log.AppendFormat("\ncpu cache limit = %zu ( fontcache = %zu ):\n", cacheLimit, fontCacheLimit);
381 }
382
DumpGpuCache(DfxString & log,const Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag * tag,std::string & name)383 void MemoryManager::DumpGpuCache(
384 DfxString& log, const Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag* tag, std::string& name)
385 {
386 if (!gpuContext) {
387 log.AppendFormat("gpuContext is nullptr.\n");
388 return;
389 }
390 /* GPU */
391 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
392 log.AppendFormat("\n---------------\nSkia GPU Caches:%s\n", name.c_str());
393 Drawing::TraceMemoryDump gpuTracer("category", true);
394 if (tag) {
395 gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, *tag);
396 } else {
397 gpuContext->DumpMemoryStatistics(&gpuTracer);
398 #ifdef RS_ENABLE_VK
399 RsVulkanMemStat& memStat = RsVulkanContext::GetSingleton().GetRsVkMemStat();
400 memStat.DumpMemoryStatistics(&gpuTracer);
401 #endif
402 }
403 gpuTracer.LogOutput(log);
404 log.AppendFormat("Total GPU memory usage:\n");
405 gpuTracer.LogTotals(log);
406 #endif
407 }
408
DumpAllGpuInfo(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)409 void MemoryManager::DumpAllGpuInfo(DfxString& log, const Drawing::GPUContext* gpuContext,
410 std::vector<std::pair<NodeId, std::string>>& nodeTags)
411 {
412 if (!gpuContext) {
413 log.AppendFormat("No valid gpu cache instance.\n");
414 return;
415 }
416 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
417 for (auto& nodeTag : nodeTags) {
418 Drawing::GPUResourceTag tag(ExtractPid(nodeTag.first), 0, nodeTag.first, 0, nodeTag.second);
419 DumpGpuCache(log, gpuContext, &tag, nodeTag.second);
420 }
421 #endif
422 }
423
DumpDrawingGpuMemory(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)424 void MemoryManager::DumpDrawingGpuMemory(DfxString& log, const Drawing::GPUContext* gpuContext,
425 std::vector<std::pair<NodeId, std::string>>& nodeTags)
426 {
427 if (!gpuContext) {
428 log.AppendFormat("No valid gpu cache instance.\n");
429 return;
430 }
431 /* GPU */
432 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
433 std::string gpuInfo = "pid:" + std::to_string(getpid()) + ", threadId:" + std::to_string(gettid());
434 #ifdef ROSEN_OHOS
435 char threadName[16]; // thread name is restricted to 16 bytes
436 auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
437 if (result == 0) {
438 gpuInfo = gpuInfo + ", threadName: " + threadName;
439 }
440 #endif
441 // total
442 DumpGpuCache(log, gpuContext, nullptr, gpuInfo);
443 // Get memory of window by tag
444 DumpAllGpuInfo(log, gpuContext, nodeTags);
445 for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
446 std::string tagTypeName = RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype));
447 Drawing::GPUResourceTag tag(0, 0, 0, tagtype, tagTypeName);
448 DumpGpuCache(log, gpuContext, &tag, tagTypeName);
449 }
450 // cache limit
451 size_t cacheLimit = 0;
452 size_t cacheUsed = 0;
453 gpuContext->GetResourceCacheLimits(nullptr, &cacheLimit);
454 gpuContext->GetResourceCacheUsage(nullptr, &cacheUsed);
455 log.AppendFormat("\ngpu limit = %zu ( used = %zu ):\n", cacheLimit, cacheUsed);
456
457 /* ShaderCache */
458 log.AppendFormat("\n---------------\nShader Caches:\n");
459 #ifdef NEW_RENDER_CONTEXT
460 log.AppendFormat(MemoryHandler::QuerryShader().c_str());
461 #else
462 std::shared_ptr<RenderContext> rendercontext = std::make_shared<RenderContext>();
463 log.AppendFormat(rendercontext->GetShaderCacheSize().c_str());
464 #endif
465 // gpu stat
466 DumpGpuStats(log, gpuContext);
467 #endif
468 }
469
DumpGpuStats(DfxString & log,const Drawing::GPUContext * gpuContext)470 void MemoryManager::DumpGpuStats(DfxString& log, const Drawing::GPUContext* gpuContext)
471 {
472 log.AppendFormat("\n---------------\ndumpGpuStats:\n");
473 std::string stat;
474 gpuContext->DumpGpuStats(stat);
475
476 int statIndex = 0;
477 int statLength = stat.length();
478 while (statIndex < statLength) {
479 std::string statSubStr;
480 if (statLength - statIndex > DUPM_STRING_BUF_SIZE) {
481 statSubStr = stat.substr(statIndex, DUPM_STRING_BUF_SIZE);
482 statIndex += DUPM_STRING_BUF_SIZE;
483 } else {
484 statSubStr = stat.substr(statIndex, statLength - statIndex);
485 statIndex = statLength;
486 }
487 log.AppendFormat("%s", statSubStr.c_str());
488 }
489 log.AppendFormat("\ndumpGpuStats end\n---------------\n");
490 }
491
DumpMallocStat(std::string & log)492 void MemoryManager::DumpMallocStat(std::string& log)
493 {
494 malloc_stats_print(
495 [](void* fp, const char* str) {
496 if (!fp) {
497 RS_LOGE("DumpMallocStat fp is nullptr");
498 return;
499 }
500 std::string* sp = static_cast<std::string*>(fp);
501 if (str) {
502 // cause log only support 2096 len. we need to only output critical log
503 // and only put total log in RSLOG
504 // get allocated string
505 if (strncmp(str, "Allocated", strlen("Allocated")) == 0) {
506 sp->append(str);
507 }
508 RS_LOGW("[mallocstat]:%{public}s", str);
509 }
510 },
511 &log, nullptr);
512 }
513
DumpMemorySnapshot(DfxString & log)514 void MemoryManager::DumpMemorySnapshot(DfxString& log)
515 {
516 log.AppendFormat("\n---------------\nmemorySnapshots:\n");
517 std::unordered_map<pid_t, MemorySnapshotInfo> memorySnapshotInfo;
518 MemorySnapshot::Instance().GetMemorySnapshot(memorySnapshotInfo);
519 for (auto& [pid, snapshotInfo] : memorySnapshotInfo) {
520 std::string infoStr = "pid: " + std::to_string(pid) +
521 ", cpu: " + std::to_string(snapshotInfo.cpuMemory) +
522 ", gpu: " + std::to_string(snapshotInfo.gpuMemory);
523 log.AppendFormat("%s\n", infoStr.c_str());
524 }
525 }
526
ParseMemoryLimit(const cJSON * json,const char * name)527 uint64_t ParseMemoryLimit(const cJSON* json, const char* name)
528 {
529 cJSON* jsonItem = cJSON_GetObjectItem(json, name);
530 if (jsonItem != nullptr && cJSON_IsNumber(jsonItem)) {
531 return static_cast<uint64_t>(jsonItem->valueint) * MEMUNIT_RATE * MEMUNIT_RATE;
532 }
533 return UINT64_MAX;
534 }
535
InitMemoryLimit()536 void MemoryManager::InitMemoryLimit()
537 {
538 std::ifstream configFile;
539 configFile.open(KERNEL_CONFIG_PATH);
540 std::stringstream filterParamsStream;
541 filterParamsStream << configFile.rdbuf();
542 configFile.close();
543 std::string paramsString = filterParamsStream.str();
544
545 cJSON* root = cJSON_Parse(paramsString.c_str());
546 if (root == nullptr) {
547 RS_LOGE("MemoryManager::InitMemoryLimit can not parse config to json");
548 return;
549 }
550 cJSON* kernelLeak = cJSON_GetObjectItem(root, "KernelLeak");
551 if (kernelLeak == nullptr) {
552 RS_LOGE("MemoryManager::InitMemoryLimit can not find kernelLeak");
553 cJSON_Delete(root);
554 return;
555 }
556 cJSON* version = cJSON_GetObjectItem(kernelLeak, RSSystemProperties::GetVersionType().c_str());
557 if (version == nullptr) {
558 RS_LOGE("MemoryManager::InitMemoryLimit can not find version");
559 cJSON_Delete(root);
560 return;
561 }
562 cJSON* rsWatchPoint = cJSON_GetObjectItem(version, "rs_watchpoint");
563 if (rsWatchPoint == nullptr) {
564 RS_LOGE("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
565 cJSON_Delete(root);
566 return;
567 }
568 // warning threshold for total memory of a single process
569 memoryWarning_ = ParseMemoryLimit(rsWatchPoint, "process_warning_threshold");
570 // error threshold for cpu memory of a single process
571 uint64_t cpuMemoryControl = ParseMemoryLimit(rsWatchPoint, "process_cpu_control_threshold");
572 // error threshold for gpu memory of a single process
573 gpuMemoryControl_ = ParseMemoryLimit(rsWatchPoint, "process_gpu_control_threshold");
574 // threshold for the total memory of all processes in renderservice
575 uint64_t totalMemoryWarning = ParseMemoryLimit(rsWatchPoint, "total_threshold");
576 cJSON_Delete(root);
577
578 MemorySnapshot::Instance().InitMemoryLimit(MemoryOverflow, memoryWarning_, cpuMemoryControl, totalMemoryWarning);
579 }
580
SetGpuMemoryLimit(Drawing::GPUContext * gpuContext)581 void MemoryManager::SetGpuMemoryLimit(Drawing::GPUContext* gpuContext)
582 {
583 if (gpuContext == nullptr || gpuMemoryControl_ == UINT64_MAX) {
584 RS_LOGW("MemoryManager::SetGpuMemoryLimit gpuContext is nullptr or gpuMemoryControl_ is uninitialized");
585 return;
586 }
587 gpuContext->InitGpuMemoryLimit(MemoryOverflow, gpuMemoryControl_);
588 }
589
MemoryOverCheck(Drawing::GPUContext * gpuContext)590 void MemoryManager::MemoryOverCheck(Drawing::GPUContext* gpuContext)
591 {
592 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
593 frameCount_++;
594 if (!gpuContext || frameCount_ < FRAME_NUMBER) {
595 return;
596 }
597 frameCount_ = 0;
598 std::unordered_map<pid_t, size_t> gpuMemory;
599 gpuContext->GetUpdatedMemoryMap(gpuMemory);
600
601 auto task = [gpuMemory = std::move(gpuMemory)]() {
602 std::unordered_map<pid_t, MemorySnapshotInfo> infoMap;
603 bool isTotalOver = false;
604 MemorySnapshot::Instance().UpdateGpuMemoryInfo(gpuMemory, infoMap, isTotalOver);
605 auto now = std::chrono::steady_clock::now().time_since_epoch();
606 uint64_t currentTime = std::chrono::duration_cast<std::chrono::milliseconds>(now).count();
607 // total memory overflow of all processes in renderservice
608 if (isTotalOver && currentTime > totalMemoryReportTime_) {
609 TotalMemoryOverReport(infoMap);
610 totalMemoryReportTime_ = currentTime + MEMORY_REPORT_INTERVAL;
611 }
612
613 std::string bundleName;
614 bool needReport = false;
615 for (const auto& [pid, memoryInfo] : infoMap) {
616 if (memoryInfo.TotalMemory() <= memoryWarning_) {
617 continue;
618 }
619 needReport = false;
620 {
621 std::lock_guard<std::mutex> lock(mutex_);
622 auto it = pidInfo_.find(pid);
623 if (it == pidInfo_.end()) {
624 int32_t uid;
625 auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
626 appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
627 pidInfo_.emplace(pid, std::make_pair(bundleName, currentTime + MEMORY_REPORT_INTERVAL));
628 needReport = true;
629 } else if (currentTime > it->second.second) {
630 it->second.second = currentTime + MEMORY_REPORT_INTERVAL;
631 bundleName = it->second.first;
632 needReport = true;
633 }
634 }
635 if (needReport) {
636 MemoryOverReport(pid, memoryInfo, bundleName, "RENDER_MEMORY_OVER_WARNING");
637 }
638 }
639 };
640 RSBackgroundThread::Instance().PostTask(task);
641 #endif
642 }
643
KillProcessByPid(const pid_t pid,const std::string & processName,const std::string & reason)644 static void KillProcessByPid(const pid_t pid, const std::string& processName, const std::string& reason)
645 {
646 #ifdef RES_SCHED_ENABLE
647 std::unordered_map<std::string, std::string> killInfo;
648 killInfo["pid"] = std::to_string(pid);
649 killInfo["processName"] = processName;
650 killInfo["killReason"] = reason;
651 if (pid > 0) {
652 int32_t eventWriteStatus = -1;
653 int32_t killStatus = ResourceSchedule::ResSchedClient::GetInstance().KillProcess(killInfo);
654 if (killStatus == 0) {
655 eventWriteStatus = HiSysEventWrite(HiviewDFX::HiSysEvent::Domain::FRAMEWORK, "PROCESS_KILL",
656 HiviewDFX::HiSysEvent::EventType::FAULT, "PID", pid, "PROCESS_NAME", processName,
657 "MSG", reason, "FOREGROUND", false);
658 }
659 // To prevent the print from being filtered, use RS_LOGE.
660 RS_LOGE("KillProcessByPid, pid: %{public}d, process name: %{public}s, "
661 "killStatus: %{public}d, eventWriteStatus: %{public}d, reason: %{public}s",
662 static_cast<int32_t>(pid), processName.c_str(), killStatus, eventWriteStatus, reason.c_str());
663 }
664 #endif
665 }
666
MemoryOverflow(pid_t pid,size_t overflowMemory,bool isGpu)667 void MemoryManager::MemoryOverflow(pid_t pid, size_t overflowMemory, bool isGpu)
668 {
669 MemorySnapshotInfo info;
670 MemorySnapshot::Instance().GetMemorySnapshotInfoByPid(pid, info);
671 if (isGpu) {
672 info.gpuMemory = overflowMemory;
673 }
674 int32_t uid;
675 std::string bundleName;
676 auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
677 appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
678 RSMainThread::Instance()->PostTask([]() {
679 RS_TRACE_NAME_FMT("RSMem Dump Task");
680 std::unordered_set<std::u16string> argSets;
681 std::string dumpString = "";
682 std::string type = MEM_SNAPSHOT;
683 RSMainThread::Instance()->DumpMem(argSets, dumpString, type, 0);
684 RS_LOGI("=======================RSMem Dump Info=======================");
685 std::istringstream stream(dumpString);
686 std::string line;
687 while (std::getline(stream, line)) {
688 RS_LOGI("%{public}s", line.c_str());
689 }
690 RS_LOGI("=============================================================");
691 });
692 std::string reason = "RENDER_MEMORY_OVER_ERROR: cpu[" + std::to_string(info.cpuMemory)
693 + "], gpu[" + std::to_string(info.gpuMemory) + "], total["
694 + std::to_string(info.TotalMemory()) + "]";
695 MemoryOverReport(pid, info, bundleName, "RENDER_MEMORY_OVER_ERROR");
696 KillProcessByPid(pid, bundleName, reason);
697 RS_LOGE("RSMemoryOverflow pid[%{public}d] cpu[%{public}zu] gpu[%{public}zu]", pid, info.cpuMemory, info.gpuMemory);
698 }
699
MemoryOverReport(const pid_t pid,const MemorySnapshotInfo & info,const std::string & bundleName,const std::string & reportName)700 void MemoryManager::MemoryOverReport(const pid_t pid, const MemorySnapshotInfo& info, const std::string& bundleName,
701 const std::string& reportName)
702 {
703 int ret = HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, reportName,
704 OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "PID", pid,
705 "BUNDLE_NAME", bundleName,
706 "CPU_MEMORY", info.cpuMemory,
707 "GPU_MEMORY", info.gpuMemory,
708 "TOTAL_MEMORY", info.TotalMemory());
709 RS_LOGW("RSMemoryOverReport pid[%{public}d] bundleName[%{public}s] cpu[%{public}zu] "
710 "gpu[%{public}zu] total[%{public}zu] ret[%{public}d]",
711 pid, bundleName.c_str(), info.cpuMemory, info.gpuMemory, info.TotalMemory(), ret);
712 }
713
TotalMemoryOverReport(const std::unordered_map<pid_t,MemorySnapshotInfo> & infoMap)714 void MemoryManager::TotalMemoryOverReport(const std::unordered_map<pid_t, MemorySnapshotInfo>& infoMap)
715 {
716 std::ostringstream oss;
717 for (const auto& info : infoMap) {
718 oss << info.first << '_' << info.second.TotalMemory() << ' ';
719 }
720 HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, "RENDER_MEMORY_OVER_TOTAL_ERROR",
721 OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "MEMORY_MSG", oss.str());
722 }
723
ErasePidInfo(const std::set<pid_t> & exitedPidSet)724 void MemoryManager::ErasePidInfo(const std::set<pid_t>& exitedPidSet)
725 {
726 std::lock_guard<std::mutex> lock(mutex_);
727 for (auto pid : exitedPidSet) {
728 pidInfo_.erase(pid);
729 }
730 }
731
VmaDefragment(Drawing::GPUContext * gpuContext)732 void MemoryManager::VmaDefragment(Drawing::GPUContext* gpuContext)
733 {
734 #if defined(RS_ENABLE_VK)
735 if (!gpuContext) {
736 RS_LOGE("VmaDefragment fail, gpuContext is nullptr");
737 return;
738 }
739 RS_TRACE_NAME_FMT("VmaDefragment");
740 gpuContext->VmaDefragment();
741 #endif
742 }
743 } // namespace OHOS::Rosen