1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <stddef.h> /* NULL */
17 #include <sys/mman.h> /* mmap */
18 #include <sched.h> /* sched_yield() */
19 #include <limits.h>
20
21 #include "hilog/log_c.h"
22 #include "pm_util.h"
23 #include "ux_page_table_c.h"
24
25 #undef LOG_TAG
26 #define LOG_TAG "PurgeableMemC: UPT"
27
28 #if defined(USE_UXPT) && (USE_UXPT > 0) /* (USE_UXPT > 0) means using uxpt */
29
30 /*
31 * using uint64_t as uxpte_t to avoid avoid confusion on 32-bit and 64 bit systems.
32 * Type uxpte_t may be modified to uint32_t in the future, so typedef is used.
33 */
34 typedef uint64_t uxpte_t;
35
36 typedef struct UserExtendPageTable {
37 uint64_t dataAddr;
38 size_t dataSize;
39 uxpte_t *uxpte;
40 } UxPageTableStruct;
41
42 static bool g_supportUxpt = false;
43
44 /*
45 * -------------------------------------------------------------------------
46 * | virtual page number | |
47 * |--------------------------------------------| vaddr offset in virt page |
48 * | uxpte page number | offset in uxpte page | |
49 * --------------------------------------------------------------------------
50 * | | UXPTE_PER_PAGE_SHIFT | PAGE_SHIFT |
51 */
52 static const size_t UXPTE_SIZE_SHIFT = 3;
53 static const size_t UXPTE_PER_PAGE_SHIFT = PAGE_SHIFT - UXPTE_SIZE_SHIFT;
54 static const size_t UXPTE_PER_PAGE = 1 << UXPTE_PER_PAGE_SHIFT;
55
56 /* get virtual page number from virtual address */
VirtPageNo(uint64_t vaddr)57 static inline uint64_t VirtPageNo(uint64_t vaddr)
58 {
59 return vaddr >> PAGE_SHIFT;
60 }
61
62 /* page number in user page table of uxpte for virtual address */
UxptePageNo(uint64_t vaddr)63 static inline uint64_t UxptePageNo(uint64_t vaddr)
64 {
65 return VirtPageNo(vaddr) >> UXPTE_PER_PAGE_SHIFT;
66 }
67
68 /* uxpte offset in uxpte page for virtual address */
UxpteOffset(uint64_t vaddr)69 static inline uint64_t UxpteOffset(uint64_t vaddr)
70 {
71 return VirtPageNo(vaddr) & (UXPTE_PER_PAGE - 1);
72 }
73
74 static const size_t UXPTE_PRESENT_BIT = 1;
75 static const size_t UXPTE_PRESENT_MASK = (1 << UXPTE_PRESENT_BIT) - 1;
76 static const size_t UXPTE_REFCNT_ONE = 1 << UXPTE_PRESENT_BIT;
77 static const uxpte_t UXPTE_UNDER_RECLAIM = (uxpte_t)(-UXPTE_REFCNT_ONE);
78
IsUxptePresent(uxpte_t pte)79 static inline bool IsUxptePresent(uxpte_t pte)
80 {
81 return pte & (uxpte_t)UXPTE_PRESENT_MASK;
82 }
83
IsUxpteUnderReclaim(uxpte_t pte)84 static inline bool IsUxpteUnderReclaim(uxpte_t pte)
85 {
86 return pte == UXPTE_UNDER_RECLAIM;
87 }
88
GetUxPageSize(uint64_t dataAddr,size_t dataSize)89 static size_t GetUxPageSize(uint64_t dataAddr, size_t dataSize)
90 {
91 if (dataAddr + dataSize < dataAddr || dataAddr + dataSize < dataSize || dataAddr + dataSize < 1) {
92 HILOG_ERROR(LOG_CORE, "%{public}s: Addition overflow!", __func__);
93 return 0;
94 }
95 uint64_t pageNoEnd = UxptePageNo(dataAddr + dataSize -1);
96 uint64_t pageNoStart = UxptePageNo(dataAddr);
97 if (pageNoEnd < pageNoStart) {
98 HILOG_ERROR(LOG_CORE, "pageNoEnd < pageNoStart");
99 return 0;
100 }
101 if (pageNoEnd - pageNoStart + 1 > SIZE_MAX / PAGE_SIZE) {
102 HILOG_ERROR(LOG_CORE, "pageNoEnd - pageNoStart + 1 > SIZE_MAX / PAGE_SIZE");
103 return 0;
104 }
105 return (pageNoEnd - pageNoStart + 1) * PAGE_SIZE;
106 }
107
RoundUp(uint64_t val,size_t align)108 static inline uint64_t RoundUp(uint64_t val, size_t align)
109 {
110 if (val + align < val || val + align < align) {
111 HILOG_ERROR(LOG_CORE, "%{public}s: Addition overflow!", __func__);
112 return val;
113 }
114 if (align == 0) {
115 return val;
116 }
117 return ((val + align - 1) / align) * align;
118 }
119
RoundDown(uint64_t val,size_t align)120 static inline uint64_t RoundDown(uint64_t val, size_t align)
121 {
122 if (align == 0) {
123 return val;
124 }
125 return val & (~(align - 1));
126 }
127
128 enum UxpteOp {
129 UPT_GET = 0,
130 UPT_PUT = 1,
131 UPT_CLEAR = 2,
132 UPT_IS_PRESENT = 3,
133 };
134
135 static void __attribute__((constructor)) CheckUxpt(void);
136 static void UxpteAdd(uxpte_t *pte, size_t incNum);
137 static void UxpteSub(uxpte_t *pte, size_t decNum);
138
139 static void GetUxpteAt(UxPageTableStruct *upt, uint64_t addr);
140 static void PutUxpteAt(UxPageTableStruct *upt, uint64_t addr);
141 static bool IsPresentAt(UxPageTableStruct *upt, uint64_t addr);
142 static PMState UxpteOps(UxPageTableStruct *upt, uint64_t addr, size_t len, enum UxpteOp op);
143
144 static uxpte_t *MapUxptePages(uint64_t dataAddr, size_t dataSize);
145 static int UnmapUxptePages(uxpte_t *ptes, size_t size);
146
CheckUxpt(void)147 static void __attribute__((constructor)) CheckUxpt(void)
148 {
149 int prot = PROT_READ | PROT_WRITE;
150 int type = MAP_ANONYMOUS | MAP_PURGEABLE;
151 size_t dataSize = PAGE_SIZE;
152 /* try to mmap purgable page */
153 void *dataPtr = mmap(NULL, dataSize, prot, type, -1, 0);
154 if (dataPtr == MAP_FAILED) {
155 HILOG_ERROR(LOG_CORE, "%{public}s: not support MAP_PURG", __func__);
156 g_supportUxpt = false;
157 return;
158 }
159 /* try to mmap uxpt page */
160 type = MAP_ANONYMOUS | MAP_USEREXPTE;
161 size_t uptSize = GetUxPageSize((uint64_t)dataPtr, dataSize);
162 void *ptes = mmap(NULL, uptSize, prot, type, -1, UxptePageNo((uint64_t)dataPtr) * PAGE_SIZE);
163 if (ptes != MAP_FAILED) {
164 g_supportUxpt = true;
165 /* free uxpt */
166 if (munmap(ptes, uptSize) != 0) {
167 HILOG_ERROR(LOG_CORE, "%{public}s: unmap uxpt fail", __func__);
168 }
169 } else { /* MAP_FAILED */
170 g_supportUxpt = false;
171 HILOG_ERROR(LOG_CORE, "%{public}s: not support uxpt", __func__);
172 }
173 ptes = NULL;
174 /* free data */
175 if (munmap(dataPtr, dataSize) != 0) {
176 HILOG_ERROR(LOG_CORE, "%{public}s: unmap purg data fail", __func__);
177 }
178 dataPtr = NULL;
179 HILOG_INFO(LOG_CORE, "%{public}s: supportUxpt=%{public}s", __func__, (g_supportUxpt ? "1" : "0"));
180 return;
181 }
182
UxpteIsEnabled(void)183 bool UxpteIsEnabled(void)
184 {
185 return g_supportUxpt;
186 }
187
UxPageTableSize(void)188 size_t UxPageTableSize(void)
189 {
190 return sizeof(UxPageTableStruct);
191 }
192
InitUxPageTable(UxPageTableStruct * upt,uint64_t addr,size_t len)193 PMState InitUxPageTable(UxPageTableStruct *upt, uint64_t addr, size_t len)
194 {
195 if (!g_supportUxpt) {
196 HILOG_DEBUG(LOG_CORE, "%{public}s: not support uxpt", __func__);
197 return PM_OK;
198 }
199 if (upt == NULL) {
200 HILOG_ERROR(LOG_CORE, "%{public}s: upt is NULL!", __func__);
201 return PM_MMAP_UXPT_FAIL;
202 }
203 upt->dataAddr = addr;
204 upt->dataSize = len;
205 upt->uxpte = MapUxptePages(upt->dataAddr, upt->dataSize);
206 if (!(upt->uxpte)) {
207 return PM_MMAP_UXPT_FAIL;
208 }
209 UxpteClear(upt, addr, len);
210 return PM_OK;
211 }
212
DeinitUxPageTable(UxPageTableStruct * upt)213 PMState DeinitUxPageTable(UxPageTableStruct *upt)
214 {
215 if (!g_supportUxpt) {
216 HILOG_DEBUG(LOG_CORE, "%{public}s: not support uxpt", __func__);
217 return PM_OK;
218 }
219 if (upt == NULL) {
220 HILOG_ERROR(LOG_CORE, "%{public}s: upt is NULL!", __func__);
221 return PM_MMAP_UXPT_FAIL;
222 }
223 size_t size = GetUxPageSize(upt->dataAddr, upt->dataSize);
224 int unmapRet = 0;
225 if (upt->uxpte) {
226 unmapRet = UnmapUxptePages(upt->uxpte, size);
227 if (unmapRet != 0) {
228 HILOG_ERROR(LOG_CORE, "%{public}s: unmap uxpt fail", __func__);
229 return PM_UNMAP_UXPT_FAIL;
230 }
231 upt->uxpte = NULL;
232 }
233 upt->dataAddr = 0;
234 upt->dataSize = 0;
235 return PM_OK;
236 }
237
UxpteGet(UxPageTableStruct * upt,uint64_t addr,size_t len)238 void UxpteGet(UxPageTableStruct *upt, uint64_t addr, size_t len)
239 {
240 if (!g_supportUxpt) {
241 return;
242 }
243 UxpteOps(upt, addr, len, UPT_GET);
244 }
245
UxptePut(UxPageTableStruct * upt,uint64_t addr,size_t len)246 void UxptePut(UxPageTableStruct *upt, uint64_t addr, size_t len)
247 {
248 if (!g_supportUxpt) {
249 return;
250 }
251 UxpteOps(upt, addr, len, UPT_PUT);
252 }
253
UxpteClear(UxPageTableStruct * upt,uint64_t addr,size_t len)254 void UxpteClear(UxPageTableStruct *upt, uint64_t addr, size_t len)
255 {
256 if (!g_supportUxpt) {
257 return;
258 }
259 UxpteOps(upt, addr, len, UPT_CLEAR);
260 }
261
UxpteIsPresent(UxPageTableStruct * upt,uint64_t addr,size_t len)262 bool UxpteIsPresent(UxPageTableStruct *upt, uint64_t addr, size_t len)
263 {
264 if (!g_supportUxpt) {
265 return true;
266 }
267 PMState ret = UxpteOps(upt, addr, len, UPT_IS_PRESENT);
268 return ret == PM_OK;
269 }
270
UxpteLoad(const uxpte_t * uxpte)271 static inline uxpte_t UxpteLoad(const uxpte_t *uxpte)
272 {
273 __sync_synchronize();
274 return *uxpte;
275 }
276
UxpteCAS_(uxpte_t * uxpte,uxpte_t old,uxpte_t newVal)277 static inline bool UxpteCAS_(uxpte_t *uxpte, uxpte_t old, uxpte_t newVal)
278 {
279 return __sync_bool_compare_and_swap(uxpte, old, newVal);
280 }
281
UxpteAdd(uxpte_t * pte,size_t incNum)282 static void UxpteAdd(uxpte_t *pte, size_t incNum)
283 {
284 uxpte_t old = 0;
285 uxpte_t newVal = 0;
286 do {
287 old = UxpteLoad(pte);
288 if (old + incNum < old || old + incNum < incNum) {
289 break;
290 }
291 newVal = old + incNum;
292 if (ULONG_MAX - old < incNum) {
293 return;
294 }
295 if (IsUxpteUnderReclaim(old)) {
296 sched_yield();
297 continue;
298 }
299 } while (!UxpteCAS_(pte, old, newVal));
300 }
301
UxpteSub(uxpte_t * pte,size_t decNum)302 static void UxpteSub(uxpte_t *pte, size_t decNum)
303 {
304 uxpte_t old;
305 do {
306 old = UxpteLoad(pte);
307 } while (!UxpteCAS_(pte, old, old - decNum));
308 }
309
UxpteClear_(uxpte_t * pte)310 static void UxpteClear_(uxpte_t *pte)
311 {
312 uxpte_t old = UxpteLoad(pte);
313 if ((unsigned long long)old == 0) {
314 return; /* has been set to zero */
315 }
316 HILOG_ERROR(LOG_CORE, "%{public}s: upte(0x%{public}llx) != 0", __func__, (unsigned long long)old);
317 do {
318 old = UxpteLoad(pte);
319 } while (!UxpteCAS_(pte, old, 0));
320 }
321
GetIndexInUxpte(uint64_t startAddr,uint64_t currAddr)322 static inline size_t GetIndexInUxpte(uint64_t startAddr, uint64_t currAddr)
323 {
324 return UxpteOffset(startAddr) + (VirtPageNo(currAddr) - VirtPageNo(startAddr));
325 }
326
GetUxpteAt(UxPageTableStruct * upt,uint64_t addr)327 static void GetUxpteAt(UxPageTableStruct *upt, uint64_t addr)
328 {
329 if (upt == NULL) {
330 HILOG_ERROR(LOG_CORE, "%{public}s: upt is NULL!", __func__);
331 return;
332 }
333 size_t index = GetIndexInUxpte(upt->dataAddr, addr);
334 UxpteAdd(&(upt->uxpte[index]), UXPTE_REFCNT_ONE);
335
336 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx",
337 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]));
338 }
339
PutUxpteAt(UxPageTableStruct * upt,uint64_t addr)340 static void PutUxpteAt(UxPageTableStruct *upt, uint64_t addr)
341 {
342 if (upt == NULL) {
343 HILOG_ERROR(LOG_CORE, "%{public}s: upt is NULL!", __func__);
344 return;
345 }
346 size_t index = GetIndexInUxpte(upt->dataAddr, addr);
347 UxpteSub(&(upt->uxpte[index]), UXPTE_REFCNT_ONE);
348
349 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx",
350 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]));
351 }
352
ClearUxpteAt(UxPageTableStruct * upt,uint64_t addr)353 static void ClearUxpteAt(UxPageTableStruct *upt, uint64_t addr)
354 {
355 if (upt == NULL) {
356 HILOG_ERROR(LOG_CORE, "%{public}s: upt is NULL!", __func__);
357 return;
358 }
359 size_t index = GetIndexInUxpte(upt->dataAddr, addr);
360 UxpteClear_(&(upt->uxpte[index]));
361 }
362
IsPresentAt(UxPageTableStruct * upt,uint64_t addr)363 static bool IsPresentAt(UxPageTableStruct *upt, uint64_t addr)
364 {
365 size_t index = GetIndexInUxpte(upt->dataAddr, addr);
366
367 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx PRESENT_MASK=0x%{public}zx",
368 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]), UXPTE_PRESENT_MASK);
369
370 return IsUxptePresent(upt->uxpte[index]);
371 }
372
UxpteOps(UxPageTableStruct * upt,uint64_t addr,size_t len,enum UxpteOp op)373 static PMState UxpteOps(UxPageTableStruct *upt, uint64_t addr, size_t len, enum UxpteOp op)
374 {
375 if (upt == NULL) {
376 return PM_BUILDER_NULL;
377 }
378 uint64_t start = RoundDown(addr, PAGE_SIZE);
379 uint64_t end = RoundUp(addr + len, PAGE_SIZE);
380 if (start < upt->dataAddr || end > (upt->dataAddr + upt->dataSize)) {
381 HILOG_ERROR(LOG_CORE, "%{public}s: addr(0x%{public}llx) start(0x%{public}llx) < dataAddr(0x%{public}llx)"
382 " || end(0x%{public}llx) > dataAddr+dataSize(0x%{public}llx) out of bound",
383 __func__, (unsigned long long)addr, (unsigned long long)start, (unsigned long long)(upt->dataAddr),
384 (unsigned long long)end, (unsigned long long)(upt->dataAddr + upt->dataSize));
385
386 return PM_UXPT_OUT_RANGE;
387 }
388
389 for (uint64_t off = start; off < end; off += PAGE_SIZE) {
390 switch (op) {
391 case UPT_GET: {
392 GetUxpteAt(upt, off);
393 break;
394 }
395 case UPT_PUT: {
396 PutUxpteAt(upt, off);
397 break;
398 }
399 case UPT_CLEAR: {
400 ClearUxpteAt(upt, off);
401 break;
402 }
403 case UPT_IS_PRESENT: {
404 if (!IsPresentAt(upt, off)) {
405 HILOG_ERROR(LOG_CORE, "%{public}s: addr(0x%{public}llx) not present", __func__,
406 (unsigned long long)addr);
407 return PM_UXPT_NO_PRESENT;
408 }
409 break;
410 }
411 default:
412 break;
413 }
414 }
415
416 return PM_OK;
417 }
418
MapUxptePages(uint64_t dataAddr,size_t dataSize)419 static uxpte_t *MapUxptePages(uint64_t dataAddr, size_t dataSize)
420 {
421 int prot = PROT_READ | PROT_WRITE;
422 int type = MAP_ANONYMOUS | MAP_USEREXPTE;
423 size_t size = GetUxPageSize(dataAddr, dataSize);
424 uxpte_t *ptes = (uxpte_t*)mmap(NULL, size, prot, type, -1, UxptePageNo(dataAddr) * PAGE_SIZE);
425 if (ptes == MAP_FAILED) {
426 HILOG_ERROR(LOG_CORE, "%{public}s: fail, return NULL", __func__);
427 ptes = NULL;
428 }
429
430 return ptes;
431 }
432
UnmapUxptePages(uxpte_t * ptes,size_t size)433 static int UnmapUxptePages(uxpte_t *ptes, size_t size)
434 {
435 return munmap(ptes, size);
436 }
437
438 #else /* !(defined(USE_UXPT) && (USE_UXPT <= 0)), it means does not using uxpt */
439
440 typedef struct UserExtendPageTable {
441 /* i am empty */
442 } UxPageTableStruct;
443
UxpteIsEnabled(void)444 bool UxpteIsEnabled(void)
445 {
446 return false;
447 }
448
UxPageTableSize(void)449 size_t UxPageTableSize(void)
450 {
451 return 0;
452 }
453
InitUxPageTable(UxPageTableStruct * upt,uint64_t addr,size_t len)454 PMState InitUxPageTable(UxPageTableStruct *upt, uint64_t addr, size_t len)
455 {
456 return PM_OK;
457 }
458
DeinitUxPageTable(UxPageTableStruct * upt)459 PMState DeinitUxPageTable(UxPageTableStruct *upt)
460 {
461 return PM_OK;
462 }
463
UxpteGet(UxPageTableStruct * upt,uint64_t addr,size_t len)464 void UxpteGet(UxPageTableStruct *upt, uint64_t addr, size_t len) {}
465
UxptePut(UxPageTableStruct * upt,uint64_t addr,size_t len)466 void UxptePut(UxPageTableStruct *upt, uint64_t addr, size_t len) {}
467
UxpteIsPresent(UxPageTableStruct * upt,uint64_t addr,size_t len)468 bool UxpteIsPresent(UxPageTableStruct *upt, uint64_t addr, size_t len)
469 {
470 return true;
471 }
472
473 #endif /* USE_UXPT > 0 */
474