1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "CachedAppOptimizer"
18 //#define LOG_NDEBUG 0
19 #define ATRACE_TAG ATRACE_TAG_ACTIVITY_MANAGER
20 #define ATRACE_COMPACTION_TRACK "Compaction"
21
22 #include <android-base/file.h>
23 #include <android-base/logging.h>
24 #include <android-base/stringprintf.h>
25 #include <android-base/unique_fd.h>
26 #include <android_runtime/AndroidRuntime.h>
27 #include <binder/IPCThreadState.h>
28 #include <cutils/compiler.h>
29 #include <dirent.h>
30 #include <jni.h>
31 #include <linux/errno.h>
32 #include <linux/time.h>
33 #include <log/log.h>
34 #include <meminfo/procmeminfo.h>
35 #include <meminfo/sysmeminfo.h>
36 #include <nativehelper/JNIHelp.h>
37 #include <processgroup/processgroup.h>
38 #include <stddef.h>
39 #include <stdio.h>
40 #include <sys/mman.h>
41 #include <sys/pidfd.h>
42 #include <sys/stat.h>
43 #include <sys/syscall.h>
44 #include <sys/sysinfo.h>
45 #include <sys/types.h>
46 #include <unistd.h>
47 #include <utils/Timers.h>
48 #include <utils/Trace.h>
49
50 #include <algorithm>
51
52 using android::base::StringPrintf;
53 using android::base::WriteStringToFile;
54 using android::meminfo::ProcMemInfo;
55 using namespace android::meminfo;
56
57 #define COMPACT_ACTION_FILE_FLAG 1
58 #define COMPACT_ACTION_ANON_FLAG 2
59
60 using VmaToAdviseFunc = std::function<int(const Vma&)>;
61 using android::base::unique_fd;
62
63 #define SYNC_RECEIVED_WHILE_FROZEN (1)
64 #define ASYNC_RECEIVED_WHILE_FROZEN (2)
65 #define TXNS_PENDING_WHILE_FROZEN (4)
66
67 #define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
68
69 // Defines the maximum amount of VMAs we can send per process_madvise syscall.
70 // Currently this is set to UIO_MAXIOV which is the maximum segments allowed by
71 // iovec implementation used by process_madvise syscall
72 #define MAX_VMAS_PER_BATCH UIO_MAXIOV
73
74 // Maximum bytes that we can send per process_madvise syscall once this limit
75 // is reached we split the remaining VMAs into another syscall. The MAX_RW_COUNT
76 // limit is imposed by iovec implementation. However, if you want to use a smaller
77 // limit, it has to be a page aligned value.
78 #define MAX_BYTES_PER_BATCH MAX_RW_COUNT
79
80 // Selected a high enough number to avoid clashing with linux errno codes
81 #define ERROR_COMPACTION_CANCELLED -1000
82
83 namespace android {
84
85 // Signal happening in separate thread that would bail out compaction
86 // before starting next VMA batch
87 static std::atomic<bool> cancelRunningCompaction;
88
89 // A VmaBatch represents a set of VMAs that can be processed
90 // as VMAs are processed by client code it is expected that the
91 // VMAs get consumed which means they are discarded as they are
92 // processed so that the first element always is the next element
93 // to be sent
94 struct VmaBatch {
95 struct iovec* vmas;
96 // total amount of VMAs to reach the end of iovec
97 size_t totalVmas;
98 // total amount of bytes that are remaining within iovec
99 uint64_t totalBytes;
100 };
101
102 // Advances the iterator by the specified amount of bytes.
103 // This is used to remove already processed or no longer
104 // needed parts of the batch.
105 // Returns total bytes consumed
consumeBytes(VmaBatch & batch,uint64_t bytesToConsume)106 uint64_t consumeBytes(VmaBatch& batch, uint64_t bytesToConsume) {
107 if (CC_UNLIKELY(bytesToConsume) < 0) {
108 LOG(ERROR) << "Cannot consume negative bytes for VMA batch !";
109 return 0;
110 }
111
112 if (CC_UNLIKELY(bytesToConsume > batch.totalBytes)) {
113 // Avoid consuming more bytes than available
114 bytesToConsume = batch.totalBytes;
115 }
116
117 uint64_t bytesConsumed = 0;
118 while (bytesConsumed < bytesToConsume) {
119 if (CC_UNLIKELY(batch.totalVmas == 0)) {
120 // No more vmas to consume
121 break;
122 }
123 if (CC_UNLIKELY(bytesConsumed + batch.vmas[0].iov_len > bytesToConsume)) {
124 // This vma can't be fully consumed, do it partially.
125 uint64_t bytesLeftToConsume = bytesToConsume - bytesConsumed;
126 bytesConsumed += bytesLeftToConsume;
127 batch.vmas[0].iov_base = (void*)((uint64_t)batch.vmas[0].iov_base + bytesLeftToConsume);
128 batch.vmas[0].iov_len -= bytesLeftToConsume;
129 batch.totalBytes -= bytesLeftToConsume;
130 return bytesConsumed;
131 }
132 // This vma can be fully consumed
133 bytesConsumed += batch.vmas[0].iov_len;
134 batch.totalBytes -= batch.vmas[0].iov_len;
135 --batch.totalVmas;
136 ++batch.vmas;
137 }
138
139 return bytesConsumed;
140 }
141
142 // given a source of vmas this class will act as a factory
143 // of VmaBatch objects and it will allow generating batches
144 // until there are no more left in the source vector.
145 // Note: the class does not actually modify the given
146 // vmas vector, instead it iterates on it until the end.
147 class VmaBatchCreator {
148 const std::vector<Vma>* sourceVmas;
149 const int totalVmasInSource;
150 // This is the destination array where batched VMAs will be stored
151 // it gets encapsulated into a VmaBatch which is the object
152 // meant to be used by client code.
153 struct iovec* destVmas;
154
155 // Parameters to keep track of the iterator on the source vmas
156 int currentIndex_;
157 uint64_t currentOffset_;
158
159 public:
VmaBatchCreator(const std::vector<Vma> * vmasToBatch,struct iovec * destVmasVec,int vmasInSource)160 VmaBatchCreator(const std::vector<Vma>* vmasToBatch, struct iovec* destVmasVec,
161 int vmasInSource)
162 : sourceVmas(vmasToBatch),
163 totalVmasInSource(vmasInSource),
164 destVmas(destVmasVec),
165 currentIndex_(0),
166 currentOffset_(0) {}
167
currentIndex()168 int currentIndex() { return currentIndex_; }
currentOffset()169 uint64_t currentOffset() { return currentOffset_; }
170
171 // Generates a batch and moves the iterator on the source vmas
172 // past the last VMA in the batch.
173 // Returns true on success, false on failure
createNextBatch(VmaBatch & batch)174 bool createNextBatch(VmaBatch& batch) {
175 if (currentIndex_ >= MAX_VMAS_PER_BATCH && currentIndex_ >= sourceVmas->size()) {
176 return false;
177 }
178
179 const std::vector<Vma>& vmas = *sourceVmas;
180 batch.vmas = destVmas;
181 uint64_t totalBytesInBatch = 0;
182 int indexInBatch = 0;
183
184 // Add VMAs to the batch up until we consumed all the VMAs or
185 // reached any imposed limit of VMAs per batch.
186 while (indexInBatch < MAX_VMAS_PER_BATCH && currentIndex_ < totalVmasInSource) {
187 uint64_t vmaStart = vmas[currentIndex_].start + currentOffset_;
188 uint64_t vmaSize = vmas[currentIndex_].end - vmaStart;
189 uint64_t bytesAvailableInBatch = MAX_BYTES_PER_BATCH - totalBytesInBatch;
190
191 batch.vmas[indexInBatch].iov_base = (void*)vmaStart;
192
193 if (vmaSize > bytesAvailableInBatch) {
194 // VMA would exceed the max available bytes in batch
195 // clamp with available bytes and finish batch.
196 vmaSize = bytesAvailableInBatch;
197 currentOffset_ += bytesAvailableInBatch;
198 }
199
200 batch.vmas[indexInBatch].iov_len = vmaSize;
201 totalBytesInBatch += vmaSize;
202
203 ++indexInBatch;
204 if (totalBytesInBatch >= MAX_BYTES_PER_BATCH) {
205 // Reached max bytes quota so this marks
206 // the end of the batch
207 if (CC_UNLIKELY(vmaSize == (vmas[currentIndex_].end - vmaStart))) {
208 // we reached max bytes exactly at the end of the vma
209 // so advance to next one
210 currentOffset_ = 0;
211 ++currentIndex_;
212 }
213 break;
214 }
215 // Fully finished current VMA, move to next one
216 currentOffset_ = 0;
217 ++currentIndex_;
218 }
219 batch.totalVmas = indexInBatch;
220 batch.totalBytes = totalBytesInBatch;
221 if (batch.totalVmas == 0 || batch.totalBytes == 0) {
222 // This is an empty batch, mark as failed creating.
223 return false;
224 }
225 return true;
226 }
227 };
228
229 // Madvise a set of VMAs given in a batch for a specific process
230 // The total number of bytes successfully madvised will be set on
231 // outBytesProcessed.
232 // Returns 0 on success and standard linux -errno code returned by
233 // process_madvise on failure
madviseVmasFromBatch(unique_fd & pidfd,VmaBatch & batch,int madviseType,uint64_t * outBytesProcessed)234 int madviseVmasFromBatch(unique_fd& pidfd, VmaBatch& batch, int madviseType,
235 uint64_t* outBytesProcessed) {
236 if (batch.totalVmas == 0 || batch.totalBytes == 0) {
237 // No VMAs in Batch, skip.
238 *outBytesProcessed = 0;
239 return 0;
240 }
241
242 ATRACE_BEGIN(StringPrintf("Madvise %d: %zu VMAs.", madviseType, batch.totalVmas).c_str());
243 int64_t bytesProcessedInSend =
244 process_madvise(pidfd, batch.vmas, batch.totalVmas, madviseType, 0);
245 ATRACE_END();
246 if (CC_UNLIKELY(bytesProcessedInSend == -1)) {
247 bytesProcessedInSend = 0;
248 if (errno != EINVAL) {
249 // Forward irrecoverable errors and bail out compaction
250 *outBytesProcessed = 0;
251 return -errno;
252 }
253 }
254 if (bytesProcessedInSend == 0) {
255 // When we find a VMA with error, fully consume it as it
256 // is extremely expensive to iterate on its pages one by one
257 bytesProcessedInSend = batch.vmas[0].iov_len;
258 } else if (bytesProcessedInSend < batch.totalBytes) {
259 // Partially processed the bytes requested
260 // skip last page which is where it failed.
261 bytesProcessedInSend += PAGE_SIZE;
262 }
263 bytesProcessedInSend = consumeBytes(batch, bytesProcessedInSend);
264
265 *outBytesProcessed = bytesProcessedInSend;
266 return 0;
267 }
268
269 // Legacy method for compacting processes, any new code should
270 // use compactProcess instead.
compactProcessProcfs(int pid,const std::string & compactionType)271 static inline void compactProcessProcfs(int pid, const std::string& compactionType) {
272 std::string reclaim_path = StringPrintf("/proc/%d/reclaim", pid);
273 WriteStringToFile(compactionType, reclaim_path);
274 }
275
276 // Compacts a set of VMAs for pid using an madviseType accepted by process_madvise syscall
277 // Returns the total bytes that where madvised.
278 //
279 // If any VMA fails compaction due to -EINVAL it will be skipped and continue.
280 // However, if it fails for any other reason, it will bail out and forward the error
compactMemory(const std::vector<Vma> & vmas,int pid,int madviseType,int totalVmas)281 static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseType,
282 int totalVmas) {
283 if (totalVmas == 0) {
284 return 0;
285 }
286
287 unique_fd pidfd(pidfd_open(pid, 0));
288 if (pidfd < 0) {
289 // Skip compaction if failed to open pidfd with any error
290 return -errno;
291 }
292
293 struct iovec destVmas[MAX_VMAS_PER_BATCH];
294
295 VmaBatch batch;
296 VmaBatchCreator batcher(&vmas, destVmas, totalVmas);
297
298 int64_t totalBytesProcessed = 0;
299 while (batcher.createNextBatch(batch)) {
300 uint64_t bytesProcessedInSend;
301 ScopedTrace batchTrace(ATRACE_TAG, "VMA Batch");
302 do {
303 if (CC_UNLIKELY(cancelRunningCompaction.load())) {
304 // There could be a significant delay between when a compaction
305 // is requested and when it is handled during this time our
306 // OOM adjust could have improved.
307 LOG(DEBUG) << "Cancelled running compaction for " << pid;
308 ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK,
309 StringPrintf("Cancelled compaction for %d", pid).c_str());
310 return ERROR_COMPACTION_CANCELLED;
311 }
312 int error = madviseVmasFromBatch(pidfd, batch, madviseType, &bytesProcessedInSend);
313 if (error < 0) {
314 // Returns standard linux errno code
315 return error;
316 }
317 if (CC_UNLIKELY(bytesProcessedInSend == 0)) {
318 // This means there was a problem consuming bytes,
319 // bail out since no forward progress can be made with this batch
320 break;
321 }
322 totalBytesProcessed += bytesProcessedInSend;
323 } while (batch.totalBytes > 0 && batch.totalVmas > 0);
324 }
325
326 return totalBytesProcessed;
327 }
328
getFilePageAdvice(const Vma & vma)329 static int getFilePageAdvice(const Vma& vma) {
330 if (vma.inode > 0 && !vma.is_shared) {
331 return MADV_COLD;
332 }
333 return -1;
334 }
getAnonPageAdvice(const Vma & vma)335 static int getAnonPageAdvice(const Vma& vma) {
336 if (vma.inode == 0 && !vma.is_shared) {
337 return MADV_PAGEOUT;
338 }
339 return -1;
340 }
getAnyPageAdvice(const Vma & vma)341 static int getAnyPageAdvice(const Vma& vma) {
342 if (vma.inode == 0 && !vma.is_shared) {
343 return MADV_PAGEOUT;
344 }
345 return MADV_COLD;
346 }
347
348 // Perform a full process compaction using process_madvise syscall
349 // using the madvise behavior defined by vmaToAdviseFunc per VMA.
350 //
351 // Currently supported behaviors are MADV_COLD and MADV_PAGEOUT.
352 //
353 // Returns the total number of bytes compacted on success. On error
354 // returns process_madvise errno code or if compaction was cancelled
355 // it returns ERROR_COMPACTION_CANCELLED.
356 //
357 // Not thread safe. We reuse vectors so we assume this is called only
358 // on one thread at most.
compactProcess(int pid,VmaToAdviseFunc vmaToAdviseFunc)359 static int64_t compactProcess(int pid, VmaToAdviseFunc vmaToAdviseFunc) {
360 cancelRunningCompaction.store(false);
361 static std::string mapsBuffer;
362 ATRACE_BEGIN("CollectVmas");
363 ProcMemInfo meminfo(pid);
364 static std::vector<Vma> pageoutVmas(2000), coldVmas(2000);
365 int coldVmaIndex = 0;
366 int pageoutVmaIndex = 0;
367 auto vmaCollectorCb = [&vmaToAdviseFunc, &pageoutVmaIndex, &coldVmaIndex](const Vma& vma) {
368 int advice = vmaToAdviseFunc(vma);
369 switch (advice) {
370 case MADV_COLD:
371 if (coldVmaIndex < coldVmas.size()) {
372 coldVmas[coldVmaIndex] = vma;
373 } else {
374 coldVmas.push_back(vma);
375 }
376 ++coldVmaIndex;
377 break;
378 case MADV_PAGEOUT:
379 if (pageoutVmaIndex < pageoutVmas.size()) {
380 pageoutVmas[pageoutVmaIndex] = vma;
381 } else {
382 pageoutVmas.push_back(vma);
383 }
384 ++pageoutVmaIndex;
385 break;
386 }
387 };
388 meminfo.ForEachVmaFromMaps(vmaCollectorCb, mapsBuffer);
389 ATRACE_END();
390 #ifdef DEBUG_COMPACTION
391 ALOGE("Total VMAs sent for compaction anon=%d file=%d", pageoutVmaIndex,
392 coldVmaIndex);
393 #endif
394
395 int64_t pageoutBytes = compactMemory(pageoutVmas, pid, MADV_PAGEOUT, pageoutVmaIndex);
396 if (pageoutBytes < 0) {
397 // Error, just forward it.
398 cancelRunningCompaction.store(false);
399 return pageoutBytes;
400 }
401
402 int64_t coldBytes = compactMemory(coldVmas, pid, MADV_COLD, coldVmaIndex);
403 if (coldBytes < 0) {
404 // Error, just forward it.
405 cancelRunningCompaction.store(false);
406 return coldBytes;
407 }
408
409 return pageoutBytes + coldBytes;
410 }
411
412 // Compact process using process_madvise syscall or fallback to procfs in
413 // case syscall does not exist.
compactProcessOrFallback(int pid,int compactionFlags)414 static void compactProcessOrFallback(int pid, int compactionFlags) {
415 if ((compactionFlags & (COMPACT_ACTION_ANON_FLAG | COMPACT_ACTION_FILE_FLAG)) == 0) return;
416
417 bool compactAnon = compactionFlags & COMPACT_ACTION_ANON_FLAG;
418 bool compactFile = compactionFlags & COMPACT_ACTION_FILE_FLAG;
419
420 // Set when the system does not support process_madvise syscall to avoid
421 // gathering VMAs in subsequent calls prior to falling back to procfs
422 static bool shouldForceProcFs = false;
423 std::string compactionType;
424 VmaToAdviseFunc vmaToAdviseFunc;
425
426 if (compactAnon) {
427 if (compactFile) {
428 compactionType = "all";
429 vmaToAdviseFunc = getAnyPageAdvice;
430 } else {
431 compactionType = "anon";
432 vmaToAdviseFunc = getAnonPageAdvice;
433 }
434 } else {
435 compactionType = "file";
436 vmaToAdviseFunc = getFilePageAdvice;
437 }
438
439 if (shouldForceProcFs || compactProcess(pid, vmaToAdviseFunc) == -ENOSYS) {
440 shouldForceProcFs = true;
441 compactProcessProcfs(pid, compactionType);
442 }
443 }
444
445 // This performs per-process reclaim on all processes belonging to non-app UIDs.
446 // For the most part, these are non-zygote processes like Treble HALs, but it
447 // also includes zygote-derived processes that run in system UIDs, like bluetooth
448 // or potentially some mainline modules. The only process that should definitely
449 // not be compacted is system_server, since compacting system_server around the
450 // time of BOOT_COMPLETE could result in perceptible issues.
com_android_server_am_CachedAppOptimizer_compactSystem(JNIEnv *,jobject)451 static void com_android_server_am_CachedAppOptimizer_compactSystem(JNIEnv *, jobject) {
452 std::unique_ptr<DIR, decltype(&closedir)> proc(opendir("/proc"), closedir);
453 struct dirent* current;
454 while ((current = readdir(proc.get()))) {
455 if (current->d_type != DT_DIR) {
456 continue;
457 }
458
459 // don't compact system_server, rely on persistent compaction during screen off
460 // in order to avoid mmap_sem-related stalls
461 if (atoi(current->d_name) == getpid()) {
462 continue;
463 }
464
465 std::string status_name = StringPrintf("/proc/%s/status", current->d_name);
466 struct stat status_info;
467
468 if (stat(status_name.c_str(), &status_info) != 0) {
469 // must be some other directory that isn't a pid
470 continue;
471 }
472
473 // android.os.Process.FIRST_APPLICATION_UID
474 if (status_info.st_uid >= 10000) {
475 continue;
476 }
477
478 int pid = atoi(current->d_name);
479
480 compactProcessOrFallback(pid, COMPACT_ACTION_ANON_FLAG | COMPACT_ACTION_FILE_FLAG);
481 }
482 }
483
com_android_server_am_CachedAppOptimizer_cancelCompaction(JNIEnv *,jobject)484 static void com_android_server_am_CachedAppOptimizer_cancelCompaction(JNIEnv*, jobject) {
485 cancelRunningCompaction.store(true);
486 ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK, "Cancel compaction");
487 }
488
com_android_server_am_CachedAppOptimizer_threadCpuTimeNs(JNIEnv *,jobject)489 static jlong com_android_server_am_CachedAppOptimizer_threadCpuTimeNs(JNIEnv*, jobject) {
490 int64_t currentCpuTime = systemTime(CLOCK_THREAD_CPUTIME_ID);
491
492 return currentCpuTime;
493 }
494
com_android_server_am_CachedAppOptimizer_getFreeSwapPercent(JNIEnv *,jobject)495 static jdouble com_android_server_am_CachedAppOptimizer_getFreeSwapPercent(JNIEnv*, jobject) {
496 struct sysinfo memoryInfo;
497 int error = sysinfo(&memoryInfo);
498 if(error == -1) {
499 LOG(ERROR) << "Could not check free swap space";
500 return 0;
501 }
502 return (double)memoryInfo.freeswap / (double)memoryInfo.totalswap;
503 }
504
com_android_server_am_CachedAppOptimizer_getUsedZramMemory()505 static jlong com_android_server_am_CachedAppOptimizer_getUsedZramMemory() {
506 android::meminfo::SysMemInfo sysmeminfo;
507 return sysmeminfo.mem_zram_kb();
508 }
509
com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction()510 static jlong com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction() {
511 android::meminfo::SysMemInfo sysmeminfo;
512 return sysmeminfo.mem_compacted_kb("/sys/block/zram0/");
513 }
514
com_android_server_am_CachedAppOptimizer_compactProcess(JNIEnv *,jobject,jint pid,jint compactionFlags)515 static void com_android_server_am_CachedAppOptimizer_compactProcess(JNIEnv*, jobject, jint pid,
516 jint compactionFlags) {
517 compactProcessOrFallback(pid, compactionFlags);
518 }
519
com_android_server_am_CachedAppOptimizer_freezeBinder(JNIEnv * env,jobject clazz,jint pid,jboolean freeze,jint timeout_ms)520 static jint com_android_server_am_CachedAppOptimizer_freezeBinder(JNIEnv* env, jobject clazz,
521 jint pid, jboolean freeze,
522 jint timeout_ms) {
523 jint retVal = IPCThreadState::freeze(pid, freeze, timeout_ms);
524 if (retVal != 0 && retVal != -EAGAIN) {
525 jniThrowException(env, "java/lang/RuntimeException", "Unable to freeze/unfreeze binder");
526 }
527
528 return retVal;
529 }
530
com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo(JNIEnv * env,jobject clazz,jint pid)531 static jint com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo(JNIEnv *env,
532 jobject clazz, jint pid) {
533 uint32_t syncReceived = 0, asyncReceived = 0;
534
535 int error = IPCThreadState::getProcessFreezeInfo(pid, &syncReceived, &asyncReceived);
536
537 if (error < 0) {
538 jniThrowException(env, "java/lang/RuntimeException", strerror(error));
539 }
540
541 jint retVal = 0;
542
543 // bit 0 of sync_recv goes to bit 0 of retVal
544 retVal |= syncReceived & SYNC_RECEIVED_WHILE_FROZEN;
545 // bit 0 of async_recv goes to bit 1 of retVal
546 retVal |= (asyncReceived << 1) & ASYNC_RECEIVED_WHILE_FROZEN;
547 // bit 1 of sync_recv goes to bit 2 of retVal
548 retVal |= (syncReceived << 1) & TXNS_PENDING_WHILE_FROZEN;
549
550 return retVal;
551 }
552
com_android_server_am_CachedAppOptimizer_getFreezerCheckPath(JNIEnv * env,jobject clazz)553 static jstring com_android_server_am_CachedAppOptimizer_getFreezerCheckPath(JNIEnv* env,
554 jobject clazz) {
555 std::string path;
556
557 if (!getAttributePathForTask("FreezerState", getpid(), &path)) {
558 path = "";
559 }
560
561 return env->NewStringUTF(path.c_str());
562 }
563
com_android_server_am_CachedAppOptimizer_isFreezerProfileValid(JNIEnv * env)564 static jboolean com_android_server_am_CachedAppOptimizer_isFreezerProfileValid(JNIEnv* env) {
565 int uid = getuid();
566 int pid = getpid();
567
568 return isProfileValidForProcess("Frozen", uid, pid) &&
569 isProfileValidForProcess("Unfrozen", uid, pid);
570 }
571
572 static const JNINativeMethod sMethods[] = {
573 /* name, signature, funcPtr */
574 {"cancelCompaction", "()V",
575 (void*)com_android_server_am_CachedAppOptimizer_cancelCompaction},
576 {"threadCpuTimeNs", "()J", (void*)com_android_server_am_CachedAppOptimizer_threadCpuTimeNs},
577 {"getFreeSwapPercent", "()D",
578 (void*)com_android_server_am_CachedAppOptimizer_getFreeSwapPercent},
579 {"getUsedZramMemory", "()J",
580 (void*)com_android_server_am_CachedAppOptimizer_getUsedZramMemory},
581 {"getMemoryFreedCompaction", "()J",
582 (void*)com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction},
583 {"compactSystem", "()V", (void*)com_android_server_am_CachedAppOptimizer_compactSystem},
584 {"compactProcess", "(II)V", (void*)com_android_server_am_CachedAppOptimizer_compactProcess},
585 {"freezeBinder", "(IZI)I", (void*)com_android_server_am_CachedAppOptimizer_freezeBinder},
586 {"getBinderFreezeInfo", "(I)I",
587 (void*)com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo},
588 {"getFreezerCheckPath", "()Ljava/lang/String;",
589 (void*)com_android_server_am_CachedAppOptimizer_getFreezerCheckPath},
590 {"isFreezerProfileValid", "()Z",
591 (void*)com_android_server_am_CachedAppOptimizer_isFreezerProfileValid}};
592
register_android_server_am_CachedAppOptimizer(JNIEnv * env)593 int register_android_server_am_CachedAppOptimizer(JNIEnv* env)
594 {
595 return jniRegisterNativeMethods(env, "com/android/server/am/CachedAppOptimizer",
596 sMethods, NELEM(sMethods));
597 }
598
599 }
600