1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/logging.h>
18 #include <liburing.h>
19 #include "liburing/io_uring.h"
20 #include "usb.h"
21 
prep_async_read(struct io_uring * ring,int fd,void * data,size_t len,int64_t offset)22 static int prep_async_read(struct io_uring* ring, int fd, void* data, size_t len, int64_t offset) {
23     if (io_uring_sq_space_left(ring) <= 0) {
24         LOG(ERROR) << "Submission queue run out of space.";
25         return -1;
26     }
27     auto sqe = io_uring_get_sqe(ring);
28     if (sqe == nullptr) {
29         return -1;
30     }
31     io_uring_sqe_set_flags(sqe, IOSQE_IO_LINK | IOSQE_ASYNC);
32     io_uring_prep_read(sqe, fd, data, len, offset);
33     return 0;
34 }
35 
prep_async_write(struct io_uring * ring,int fd,const void * data,size_t len,int64_t offset)36 static int prep_async_write(struct io_uring* ring, int fd, const void* data, size_t len,
37                             int64_t offset) {
38     if (io_uring_sq_space_left(ring) <= 0) {
39         LOG(ERROR) << "Submission queue run out of space.";
40         return -1;
41     }
42     auto sqe = io_uring_get_sqe(ring);
43     if (sqe == nullptr) {
44         return -1;
45     }
46     io_uring_sqe_set_flags(sqe, IOSQE_IO_LINK | IOSQE_ASYNC);
47     io_uring_prep_write(sqe, fd, data, len, offset);
48     return 0;
49 }
50 
51 template <bool read, typename T>
prep_async_io(struct io_uring * ring,int fd,T * data,size_t len,int64_t offset)52 int prep_async_io(struct io_uring* ring, int fd, T* data, size_t len, int64_t offset) {
53     if constexpr (read) {
54         return prep_async_read(ring, fd, data, len, offset);
55     } else {
56         return prep_async_write(ring, fd, data, len, offset);
57     }
58 }
59 
60 template <typename T>
DivRoundup(T x,T y)61 static constexpr T DivRoundup(T x, T y) {
62     return (x + y - 1) / y;
63 }
64 
65 extern int getMaxPacketSize(int ffs_fd);
66 
67 template <bool read, typename T>
usb_ffs_do_aio(usb_handle * h,T * const data,const int len)68 static int usb_ffs_do_aio(usb_handle* h, T* const data, const int len) {
69     const aio_block* aiob = read ? &h->read_aiob : &h->write_aiob;
70     const int num_requests = DivRoundup<int>(len, h->io_size);
71     auto cur_data = data;
72     const auto packet_size = getMaxPacketSize(aiob->fd);
73 
74     for (int bytes_remain = len; bytes_remain > 0;) {
75         const int buf_len = std::min(bytes_remain, static_cast<int>(h->io_size));
76         const auto ret = prep_async_io<read>(&h->ring, aiob->fd, cur_data, buf_len, 0);
77         if (ret < 0) {
78             PLOG(ERROR) << "Failed to queue io_uring request";
79             return -1;
80         }
81 
82         bytes_remain -= buf_len;
83         cur_data = reinterpret_cast<T*>(reinterpret_cast<size_t>(cur_data) + buf_len);
84     }
85     const int ret = io_uring_submit(&h->ring);
86     if (ret <= 0 || ret != num_requests) {
87         PLOG(ERROR) << "io_uring: failed to submit SQE entries to kernel";
88         return -1;
89     }
90     int res = 0;
91     bool success = true;
92     for (int i = 0; i < num_requests; ++i) {
93         struct io_uring_cqe* cqe{};
94         const auto ret = TEMP_FAILURE_RETRY(io_uring_wait_cqe(&h->ring, &cqe));
95         if (ret < 0 || cqe == nullptr) {
96             PLOG(ERROR) << "Failed to get CQE from kernel";
97             success = false;
98             continue;
99         }
100         res += cqe->res;
101         if (cqe->res < 0) {
102             LOG(ERROR) << "io_uring request failed:, i = " << i
103                        << ", num_requests = " << num_requests << ", res = " << cqe->res << ": "
104                        << strerror(cqe->res) << (read ? " read" : " write")
105                        << " request size: " << len << ", io_size: " << h->io_size
106                        << " max packet size: " << packet_size << ", fd: " << aiob->fd;
107             success = false;
108             errno = -cqe->res;
109         }
110         io_uring_cqe_seen(&h->ring, cqe);
111     }
112     if (!success) {
113         return -1;
114     }
115     return res;
116 }
117 
usb_ffs_io_uring_read(usb_handle * h,void * data,int len,bool)118 static int usb_ffs_io_uring_read(usb_handle* h, void* data, int len, bool /* allow_partial */) {
119     return usb_ffs_do_aio<true>(h, data, len);
120 }
121 
usb_ffs_io_uring_write(usb_handle * h,const void * data,int len)122 static int usb_ffs_io_uring_write(usb_handle* h, const void* data, int len) {
123     return usb_ffs_do_aio<false>(h, data, len);
124 }
125 
exit_io_uring_ffs(usb_handle * h)126 void exit_io_uring_ffs(usb_handle* h) {
127     io_uring_queue_exit(&h->ring);
128 }
129 
init_io_uring_ffs(usb_handle * h,size_t queue_depth)130 bool init_io_uring_ffs(usb_handle* h, size_t queue_depth) {
131     const auto err = io_uring_queue_init(queue_depth, &h->ring, 0);
132     if (err) {
133         LOG(ERROR) << "Failed to initialize io_uring of depth " << queue_depth << ": "
134                    << strerror(err);
135         return false;
136     }
137     h->write = usb_ffs_io_uring_write;
138     h->read = usb_ffs_io_uring_read;
139     return true;
140 }
141