1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  *
4  * HDF is dual licensed: you can use it either under the terms of
5  * the GPL, or the BSD license, at your option.
6  * See the LICENSE file in the root of this repository for complete details.
7  */
8 
9 #include <securec.h>
10 #include "camera_buffer_manager_adapter.h"
11 #include "osal_mem.h"
12 #include "contig_dma.h"
13 
14 struct ContigBuffer {
15     struct device *dev;
16     void *vaddr;
17     unsigned long size;
18     void *cookie;
19     dma_addr_t dmaAddr;
20     unsigned long dmaAttrs;
21     enum dma_data_direction dmaDir;
22     struct sg_table *dmaSgt;
23     struct frame_vector *vec;
24 
25     /* MMAP related */
26     struct VmareaHandler handler;
27     refcount_t refCount;
28     struct sg_table *sgtBase;
29 
30     /* DMABUF related */
31     struct dma_buf_attachment *dbAttach;
32 };
33 
34 /* scatterlist table functions */
GetContiguousSize(struct sg_table * sgt)35 static unsigned long GetContiguousSize(struct sg_table *sgt)
36 {
37     struct scatterlist *slist = NULL;
38     dma_addr_t expected = sg_dma_address(sgt->sgl);
39     uint32_t i;
40     unsigned long size = 0;
41 
42     for_each_sgtable_dma_sg(sgt, slist, i) {
43         if (sg_dma_address(slist) != expected) {
44             break;
45         }
46         expected += sg_dma_len(slist);
47         size += sg_dma_len(slist);
48     }
49     return size;
50 }
51 
52 /* callbacks for MMAP buffers */
ContigMmapFree(void * bufPriv)53 static void ContigMmapFree(void *bufPriv)
54 {
55     struct ContigBuffer *buf = bufPriv;
56     if (buf == NULL) {
57         return;
58     }
59     if (refcount_dec_and_test(&buf->refCount) == 0) {
60         return;
61     }
62 
63     if (buf->sgtBase != NULL) {
64         sg_free_table(buf->sgtBase);
65         OsalMemFree(buf->sgtBase);
66         buf->sgtBase = NULL;
67     }
68     dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dmaAddr, buf->dmaAttrs);
69     put_device(buf->dev);
70     OsalMemFree(buf);
71 }
72 
ContigMmapAlloc(struct BufferQueue * queue,uint32_t planeNum,unsigned long size)73 static void *ContigMmapAlloc(struct BufferQueue *queue, uint32_t planeNum, unsigned long size)
74 {
75     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
76     struct ContigBuffer *buf = NULL;
77     struct device *dev;
78 
79     if (queueImp->allocDev[planeNum] != NULL) {
80         dev = queueImp->allocDev[planeNum];
81     } else {
82         dev = queueImp->dev;
83     }
84 
85     if (dev == NULL) {
86         return ERR_PTR(-EINVAL);
87     }
88 
89     buf = OsalMemCalloc(sizeof(*buf));
90     if (buf == NULL) {
91         return ERR_PTR(-ENOMEM);
92     }
93 
94     buf->dmaAttrs = queueImp->dmaAttrs;
95     buf->cookie = dma_alloc_attrs(dev, size, &buf->dmaAddr, GFP_KERNEL | queueImp->gfpFlags, buf->dmaAttrs);
96     if (buf->cookie == NULL) {
97         OsalMemFree(buf);
98         return ERR_PTR(-ENOMEM);
99     }
100 
101     if ((buf->dmaAttrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
102         buf->vaddr = buf->cookie;
103     }
104 
105     /* Prevent the device from being released while the buffer is used */
106     buf->dev = get_device(dev);
107     buf->size = size;
108     buf->dmaDir = queueImp->dmaDir;
109 
110     buf->handler.refCount = &buf->refCount;
111     buf->handler.free = ContigMmapFree;
112     buf->handler.arg = buf;
113 
114     refcount_set(&buf->refCount, 1);
115 
116     return buf;
117 }
118 
ContigMmap(void * bufPriv,void * vm)119 static int32_t ContigMmap(void *bufPriv, void *vm)
120 {
121     struct ContigBuffer *buf = bufPriv;
122     struct vm_area_struct *vma =  (struct vm_area_struct *)vm;
123     int32_t ret;
124 
125     if (buf == NULL) {
126         return -EINVAL;
127     }
128 
129     ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dmaAddr, buf->size, buf->dmaAttrs);
130     if (ret != 0) {
131         return ret;
132     }
133 
134     vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
135     vma->vm_private_data = &buf->handler;
136     vma->vm_ops = GetVmOps();
137 
138     vma->vm_ops->open(vma);
139 
140     return 0;
141 }
142 
143 /* callbacks for USERPTR buffers */
ContigAllocUserPtr(struct BufferQueue * queue,uint32_t planeNum,unsigned long vaddr,unsigned long size)144 static void *ContigAllocUserPtr(struct BufferQueue *queue,
145     uint32_t planeNum, unsigned long vaddr, unsigned long size)
146 {
147     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
148     struct ContigBuffer *buf = NULL;
149     struct frame_vector *vec = NULL;
150     struct device *dev = NULL;
151     uint32_t offset;
152     int32_t numPages;
153     int32_t i;
154     int32_t ret;
155     struct sg_table *sgt = NULL;
156     unsigned long contigSize;
157     unsigned long dmaAlign = dma_get_cache_alignment();
158 
159     if (queueImp->allocDev[planeNum] != NULL) {
160         dev = queueImp->allocDev[planeNum];
161     } else {
162         dev = queueImp->dev;
163     }
164     /* Only cache aligned DMA transfers are reliable */
165     if (IS_ALIGNED(vaddr | size, dmaAlign) == 0 || size == 0 || dev == NULL) {
166         return ERR_PTR(-EINVAL);
167     }
168     buf = OsalMemCalloc(sizeof(*buf));
169     if (buf == NULL) {
170         return ERR_PTR(-ENOMEM);
171     }
172     buf->dev = dev;
173     buf->dmaDir = queueImp->dmaDir;
174     offset = lower_32_bits(offset_in_page(vaddr));
175     vec = CreateFrameVec(vaddr, size);
176     if (IS_ERR(vec)) {
177         ret = PTR_ERR(vec);
178         goto FAIL_BUF;
179     }
180     buf->vec = vec;
181     numPages = frame_vector_count(vec);
182     ret = frame_vector_to_pages(vec);
183     if (ret < 0) {
184         unsigned long *nums = frame_vector_pfns(vec);
185         for (i = 1; i < numPages; i++) {
186             if (nums[i - 1] + 1 != nums[i]) {
187                 goto FAIL_PFNVEC;
188             }
189         }
190         buf->dmaAddr = dma_map_resource(buf->dev, __pfn_to_phys(nums[0]), size, buf->dmaDir, 0);
191         if (dma_mapping_error(buf->dev, buf->dmaAddr) != 0) {
192             ret = -ENOMEM;
193             goto FAIL_PFNVEC;
194         }
195         goto OUT;
196     }
197     sgt = OsalMemCalloc(sizeof(*sgt));
198     if (sgt == NULL) {
199         ret = -ENOMEM;
200         goto FAIL_PFNVEC;
201     }
202     ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), numPages, offset, size, GFP_KERNEL);
203     if (ret != 0) {
204         goto FAIL_SGT;
205     }
206     if (dma_map_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC) != 0) {
207         ret = -EIO;
208         goto FAIL_SGT_INIT;
209     }
210     contigSize = GetContiguousSize(sgt);
211     if (contigSize < size) {
212         ret = -EFAULT;
213         goto FAIL_MAP_SG;
214     }
215     buf->dmaAddr = sg_dma_address(sgt->sgl);
216     buf->dmaSgt = sgt;
217 OUT:
218     buf->size = size;
219     return buf;
220 FAIL_MAP_SG:
221     dma_unmap_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC);
222 FAIL_SGT_INIT:
223     sg_free_table(sgt);
224 FAIL_SGT:
225     OsalMemFree(sgt);
226 FAIL_PFNVEC:
227     DestroyFrameVec(vec);
228 FAIL_BUF:
229     OsalMemFree(buf);
230     return ERR_PTR(ret);
231 }
232 
ContigFreeUserPtr(void * bufPriv)233 static void ContigFreeUserPtr(void *bufPriv)
234 {
235     if (bufPriv == NULL) {
236         return;
237     }
238     struct ContigBuffer *buf = bufPriv;
239     struct sg_table *sgt = buf->dmaSgt;
240     struct page **pages = NULL;
241 
242     if (sgt != NULL) {
243         dma_unmap_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC);
244         pages = frame_vector_pages(buf->vec);
245         /* sgt should exist only if vector contains pages... */
246         if (IS_ERR(pages)) {
247             return;
248         }
249         if (buf->dmaDir == DMA_FROM_DEVICE || buf->dmaDir == DMA_BIDIRECTIONAL) {
250             int32_t frameVecCnt = frame_vector_count(buf->vec);
251             for (int32_t i = 0; i < frameVecCnt; i++) {
252                 set_page_dirty_lock(pages[i]);
253             }
254         }
255         sg_free_table(sgt);
256         OsalMemFree(sgt);
257     } else {
258         dma_unmap_resource(buf->dev, buf->dmaAddr, buf->size, buf->dmaDir, 0);
259     }
260     DestroyFrameVec(buf->vec);
261     OsalMemFree(buf);
262 }
263 
264 /* callbacks for DMABUF buffers */
ContigMapDmaBuf(void * bufPriv)265 static int ContigMapDmaBuf(void *bufPriv)
266 {
267     if (bufPriv == NULL) {
268         return -EINVAL;
269     }
270     struct ContigBuffer *buf = bufPriv;
271     struct sg_table *sgt = NULL;
272     unsigned long contigSize;
273 
274     if (buf->dbAttach == NULL) {
275         return -EINVAL;
276     }
277 
278     if (buf->dmaSgt != NULL) {
279         return 0;
280     }
281 
282     /* get the associated scatterlist for this buffer */
283     sgt = dma_buf_map_attachment(buf->dbAttach, buf->dmaDir);
284     if (IS_ERR(sgt)) {
285         return -EINVAL;
286     }
287 
288     /* checking if dmabuf is big enough to store contiguous chunk */
289     contigSize = GetContiguousSize(sgt);
290     if (contigSize < buf->size) {
291         dma_buf_unmap_attachment(buf->dbAttach, sgt, buf->dmaDir);
292         return -EFAULT;
293     }
294 
295     buf->dmaAddr = sg_dma_address(sgt->sgl);
296     buf->dmaSgt = sgt;
297     buf->vaddr = NULL;
298 
299     return 0;
300 }
301 
ContigUnmapDmaBuf(void * bufPriv)302 static void ContigUnmapDmaBuf(void *bufPriv)
303 {
304     if (bufPriv == NULL) {
305         return;
306     }
307     struct ContigBuffer *buf = bufPriv;
308     struct sg_table *sgt = buf->dmaSgt;
309 
310     if (buf->dbAttach == NULL) {
311         return;
312     }
313 
314     if (sgt == NULL) {
315         return;
316     }
317 
318     if (buf->vaddr != NULL) {
319         dma_buf_vunmap(buf->dbAttach->dmabuf, buf->vaddr);
320         buf->vaddr = NULL;
321     }
322     dma_buf_unmap_attachment(buf->dbAttach, sgt, buf->dmaDir);
323 
324     buf->dmaAddr = 0;
325     buf->dmaSgt = NULL;
326 }
327 
ContigAttachDmaBuf(struct BufferQueue * queue,uint32_t planeNum,void * dmaBuf,unsigned long size)328 static void *ContigAttachDmaBuf(struct BufferQueue *queue, uint32_t planeNum, void *dmaBuf, unsigned long size)
329 {
330     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
331     struct ContigBuffer *buf = NULL;
332     struct dma_buf_attachment *dba = NULL;
333     struct device *dev = NULL;
334     struct dma_buf *dbuf = (struct dma_buf *)dmaBuf;
335 
336     if (queueImp->allocDev[planeNum] != NULL) {
337         dev = queueImp->allocDev[planeNum];
338     } else {
339         dev = queueImp->dev;
340     }
341     if (dbuf == NULL || dev == NULL) {
342         return ERR_PTR(-EINVAL);
343     }
344     if (dbuf->size < size) {
345         return ERR_PTR(-EFAULT);
346     }
347 
348     buf = OsalMemCalloc(sizeof(*buf));
349     if (buf == NULL) {
350         return ERR_PTR(-ENOMEM);
351     }
352     if (memset_s(buf, sizeof(struct ContigBuffer), 0, sizeof(struct ContigBuffer)) != EOK) {
353         HDF_LOGE("ContigAttachDmaBuf: [memcpy_s] fail!");
354         return;
355     }
356     buf->dev = dev;
357     /* create attachment for the dmabuf with the user device */
358     dba = dma_buf_attach(dbuf, buf->dev);
359     if (IS_ERR(dba)) {
360         OsalMemFree(buf);
361         return dba;
362     }
363 
364     buf->dmaDir = queueImp->dmaDir;
365     buf->size = size;
366     buf->dbAttach = dba;
367 
368     return buf;
369 }
370 
371 
ContigDetachDmaBuf(void * bufPriv)372 static void ContigDetachDmaBuf(void *bufPriv)
373 {
374     struct ContigBuffer *buf = bufPriv;
375     if (buf == NULL) {
376         return;
377     }
378 
379     /* if vb2 works correctly you should never detach mapped buffer */
380     if (buf->dmaAddr != 0) {
381         ContigUnmapDmaBuf(buf);
382     }
383 
384     /* detach this attachment */
385     dma_buf_detach(buf->dbAttach->dmabuf, buf->dbAttach);
386     OsalMemFree(buf);
387 }
388 
389 /* callbacks for all buffers */
ContigGetCookie(void * bufPriv)390 static void *ContigGetCookie(void *bufPriv)
391 {
392     struct ContigBuffer *buf = bufPriv;
393     if (buf == NULL) {
394         return ERR_PTR(-EINVAL);
395     }
396 
397     return &buf->dmaAddr;
398 }
399 
ContigGetVaddr(void * bufPriv)400 static void *ContigGetVaddr(void *bufPriv)
401 {
402     struct ContigBuffer *buf = bufPriv;
403     if (buf == NULL) {
404         return ERR_PTR(-EINVAL);
405     }
406 
407     if ((buf->vaddr == NULL) && (buf->dbAttach != NULL)) {
408         buf->vaddr = dma_buf_vmap(buf->dbAttach->dmabuf);
409     }
410 
411     return buf->vaddr;
412 }
413 
ContigNumUsers(void * bufPriv)414 static unsigned int ContigNumUsers(void *bufPriv)
415 {
416     struct ContigBuffer *buf = bufPriv;
417     if (buf == NULL) {
418         return 0;
419     }
420 
421     return refcount_read(&buf->refCount);
422 }
423 
ContigPrepareMem(void * bufPriv)424 static void ContigPrepareMem(void *bufPriv)
425 {
426     if (bufPriv == NULL) {
427         return;
428     }
429     struct ContigBuffer *buf = bufPriv;
430     struct sg_table *sgt = buf->dmaSgt;
431 
432     if (sgt == NULL) {
433         return;
434     }
435 
436     dma_sync_sgtable_for_device(buf->dev, sgt, buf->dmaDir);
437 }
438 
ContigFinishMem(void * bufPriv)439 static void ContigFinishMem(void *bufPriv)
440 {
441     if (bufPriv == NULL) {
442         return;
443     }
444     struct ContigBuffer *buf = bufPriv;
445     struct sg_table *sgt = buf->dmaSgt;
446 
447     if (sgt == NULL) {
448         return;
449     }
450 
451     dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dmaDir);
452 }
453 
454 struct MemOps g_dmaContigOps = {
455     .mmapAlloc      = ContigMmapAlloc,
456     .mmapFree       = ContigMmapFree,
457     .mmap           = ContigMmap,
458     .allocUserPtr   = ContigAllocUserPtr,
459     .freeUserPtr    = ContigFreeUserPtr,
460     .mapDmaBuf      = ContigMapDmaBuf,
461     .unmapDmaBuf    = ContigUnmapDmaBuf,
462     .attachDmaBuf   = ContigAttachDmaBuf,
463     .detachDmaBuf   = ContigDetachDmaBuf,
464     .getCookie      = ContigGetCookie,
465     .getVaddr       = ContigGetVaddr,
466     .numUsers       = ContigNumUsers,
467     .syncForDevice  = ContigPrepareMem,
468     .syncForUser    = ContigFinishMem,
469 };
470 
GetDmaContigOps(void)471 struct MemOps *GetDmaContigOps(void)
472 {
473     return &g_dmaContigOps;
474 }
475