xref: /aosp_15_r20/frameworks/av/media/mtp/MtpFfsHandle.cpp (revision ec779b8e0859a360c3d303172224686826e6e0e1)
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <asyncio/AsyncIO.h>
20 #include <dirent.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/eventfd.h>
27 #include <sys/ioctl.h>
28 #include <sys/mman.h>
29 #include <sys/poll.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33 
34 #include "PosixAsyncIO.h"
35 #include "MtpDescriptors.h"
36 #include "MtpFfsHandle.h"
37 #include "mtp.h"
38 
39 namespace {
40 
41 constexpr unsigned AIO_BUFS_MAX = 128;
42 constexpr unsigned AIO_BUF_LEN = 16384;
43 
44 constexpr unsigned FFS_NUM_EVENTS = 5;
45 
46 constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
47 
48 constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
49 // Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
50 // And this will cause high CPU usage.
51 constexpr int32_t POLL_TIMEOUT_MS = 500;
52 
53 struct timespec ZERO_TIMEOUT = { 0, 0 };
54 
55 struct mtp_device_status {
56     uint16_t  wLength;
57     uint16_t  wCode;
58 };
59 
60 } // anonymous namespace
61 
62 namespace android {
63 
getPacketSize(int ffs_fd)64 int MtpFfsHandle::getPacketSize(int ffs_fd) {
65     struct usb_endpoint_descriptor desc;
66     if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
67         PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
68         return MAX_PACKET_SIZE_HS;
69     } else {
70         return desc.wMaxPacketSize;
71     }
72 }
73 
MtpFfsHandle(int controlFd)74 MtpFfsHandle::MtpFfsHandle(int controlFd) {
75     mControl.reset(controlFd);
76     mBatchCancel = android::base::GetBoolProperty("sys.usb.mtp.batchcancel", false);
77 }
78 
~MtpFfsHandle()79 MtpFfsHandle::~MtpFfsHandle() {}
80 
closeEndpoints()81 void MtpFfsHandle::closeEndpoints() {
82     mIntr.reset();
83     mBulkIn.reset();
84     mBulkOut.reset();
85 }
86 
openEndpoints(bool ptp)87 bool MtpFfsHandle::openEndpoints(bool ptp) {
88     if (mBulkIn < 0) {
89         mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
90         if (mBulkIn < 0) {
91             PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
92             return false;
93         }
94     }
95 
96     if (mBulkOut < 0) {
97         mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
98         if (mBulkOut < 0) {
99             PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
100             return false;
101         }
102     }
103 
104     if (mIntr < 0) {
105         mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
106         if (mIntr < 0) {
107             PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
108             return false;
109         }
110     }
111     return true;
112 }
113 
advise(int fd)114 void MtpFfsHandle::advise(int fd) {
115     for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
116         if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
117                 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) != 0)
118             PLOG(ERROR) << "Failed to madvise";
119     }
120     if (posix_fadvise(fd, 0, 0,
121                 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) != 0)
122         PLOG(ERROR) << "Failed to fadvise";
123 }
124 
writeDescriptors(bool ptp)125 bool MtpFfsHandle::writeDescriptors(bool ptp) {
126     return ::android::writeDescriptors(mControl, ptp);
127 }
128 
closeConfig()129 void MtpFfsHandle::closeConfig() {
130     mControl.reset();
131 }
132 
doAsync(void * data,size_t len,bool read,bool zero_packet)133 int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
134     struct io_event ioevs[AIO_BUFS_MAX];
135     size_t total = 0;
136 
137     while (total < len) {
138         size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
139         int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
140         for (int i = 0; i < num_bufs; i++) {
141             mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
142         }
143         int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
144         if (ret < 0) return -1;
145         ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
146         if (ret < 0) return -1;
147         total += ret;
148         if (static_cast<size_t>(ret) < this_len) break;
149     }
150 
151     int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
152     if (len % packet_size == 0 && zero_packet) {
153         int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
154         if (ret < 0) return -1;
155         ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
156         if (ret < 0) return -1;
157     }
158 
159     for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
160         mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
161     }
162     return total;
163 }
164 
read(void * data,size_t len)165 int MtpFfsHandle::read(void* data, size_t len) {
166     // Zero packets are handled by receiveFile()
167     return doAsync(data, len, true, false);
168 }
169 
write(const void * data,size_t len)170 int MtpFfsHandle::write(const void* data, size_t len) {
171     return doAsync(const_cast<void*>(data), len, false, true);
172 }
173 
handleEvent()174 int MtpFfsHandle::handleEvent() {
175 
176     std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
177     usb_functionfs_event *event = events.data();
178     int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
179                 events.size() * sizeof(usb_functionfs_event)));
180     if (nbytes == -1) {
181         return -1;
182     }
183     int ret = 0;
184     for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
185         switch (event->type) {
186         case FUNCTIONFS_BIND:
187         case FUNCTIONFS_ENABLE:
188             ret = 0;
189             errno = 0;
190             break;
191         case FUNCTIONFS_UNBIND:
192         case FUNCTIONFS_DISABLE:
193             errno = ESHUTDOWN;
194             ret = -1;
195             break;
196         case FUNCTIONFS_SETUP:
197             if (handleControlRequest(&event->u.setup) == -1)
198                 ret = -1;
199             break;
200         case FUNCTIONFS_SUSPEND:
201         case FUNCTIONFS_RESUME:
202             break;
203         default:
204             LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
205         }
206     }
207     return ret;
208 }
209 
handleControlRequest(const struct usb_ctrlrequest * setup)210 int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
211     uint8_t type = setup->bRequestType;
212     uint8_t code = setup->bRequest;
213     uint16_t length = setup->wLength;
214     uint16_t index = setup->wIndex;
215     uint16_t value = setup->wValue;
216     std::vector<char> buf;
217     buf.resize(length);
218 
219     if (!(type & USB_DIR_IN)) {
220         if (::read(mControl, buf.data(), length) != length) {
221             PLOG(ERROR) << "Mtp error ctrlreq read data";
222         }
223     }
224 
225     if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
226         switch(code) {
227         case MTP_REQ_RESET:
228         case MTP_REQ_CANCEL:
229             errno = ECANCELED;
230             return -1;
231         //    break;
232         case MTP_REQ_GET_DEVICE_STATUS:
233         {
234             if (length < sizeof(struct mtp_device_status) + 4) {
235                 errno = EINVAL;
236                 return -1;
237             }
238             struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
239             st->wLength = htole16(sizeof(st));
240             if (mCanceled) {
241                 st->wLength += 4;
242                 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
243                 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
244                 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
245                 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
246                 mCanceled = false;
247             } else {
248                 st->wCode = MTP_RESPONSE_OK;
249             }
250             length = st->wLength;
251             break;
252         }
253         default:
254             LOG(ERROR) << "Unrecognized Mtp class request! " << code;
255         }
256     } else {
257         LOG(ERROR) << "Unrecognized request type " << type;
258     }
259 
260     if (type & USB_DIR_IN) {
261         if (::write(mControl, buf.data(), length) != length) {
262             PLOG(ERROR) << "Mtp error ctrlreq write data";
263         }
264     }
265     return 0;
266 }
267 
start(bool ptp)268 int MtpFfsHandle::start(bool ptp) {
269     if (!openEndpoints(ptp))
270         return -1;
271 
272     for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
273         mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
274         mIobuf[i].iocb.resize(AIO_BUFS_MAX);
275         mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
276         mIobuf[i].buf.resize(AIO_BUFS_MAX);
277         for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
278             mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
279             mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
280         }
281     }
282 
283     memset(&mCtx, 0, sizeof(mCtx));
284     if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
285         PLOG(ERROR) << "unable to setup aio";
286         return -1;
287     }
288     mEventFd.reset(eventfd(0, EFD_NONBLOCK));
289     mPollFds[0].fd = mControl;
290     mPollFds[0].events = POLLIN;
291     mPollFds[1].fd = mEventFd;
292     mPollFds[1].events = POLLIN;
293 
294     mCanceled = false;
295     return 0;
296 }
297 
close()298 void MtpFfsHandle::close() {
299     // Join all child threads before destruction
300     int count = mChildThreads.size();
301     for (int i = 0; i < count; i++) {
302         mChildThreads[i].join();
303     }
304     mChildThreads.clear();
305 
306     io_destroy(mCtx);
307     closeEndpoints();
308     closeConfig();
309 }
310 
waitEvents(struct io_buffer * buf,int min_events,struct io_event * events,int * counter)311 int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
312         int *counter) {
313     int num_events = 0;
314     int ret = 0;
315     int error = 0;
316 
317     while (num_events < min_events) {
318         if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
319             PLOG(ERROR) << "Mtp error during poll()";
320             return -1;
321         }
322         if (mPollFds[0].revents & POLLIN) {
323             mPollFds[0].revents = 0;
324             if (handleEvent() == -1) {
325                 error = errno;
326             }
327         }
328         if (mPollFds[1].revents & POLLIN) {
329             mPollFds[1].revents = 0;
330             uint64_t ev_cnt = 0;
331 
332             if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
333                 PLOG(ERROR) << "Mtp unable to read eventfd";
334                 error = errno;
335                 continue;
336             }
337 
338             // It's possible that io_getevents will return more events than the eventFd reported,
339             // since events may appear in the time between the calls. In this case, the eventFd will
340             // show up as readable next iteration, but there will be fewer or no events to actually
341             // wait for. Thus we never want io_getevents to block.
342             int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
343             if (this_events == -1) {
344                 PLOG(ERROR) << "Mtp error getting events";
345                 error = errno;
346             }
347             // Add up the total amount of data and find errors on the way.
348             for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
349                 if (events[j].res < 0) {
350                     errno = -events[j].res;
351                     PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
352                     error = errno;
353                 }
354                 ret += events[j].res;
355             }
356             num_events += this_events;
357             if (counter)
358                 *counter += this_events;
359         }
360         if (error) {
361             errno = error;
362             ret = -1;
363             break;
364         }
365     }
366     return ret;
367 }
368 
cancelTransaction()369 void MtpFfsHandle::cancelTransaction() {
370     // Device cancels by stalling both bulk endpoints.
371     if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
372         PLOG(ERROR) << "Mtp stall failed on bulk in";
373     if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
374         PLOG(ERROR) << "Mtp stall failed on bulk out";
375     mCanceled = true;
376     errno = ECANCELED;
377 }
378 
cancelEvents(struct iocb ** iocb,struct io_event * events,unsigned start,unsigned end,bool is_batch_cancel)379 int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
380         unsigned end, bool is_batch_cancel) {
381     // Some manpages for io_cancel are out of date and incorrect.
382     // io_cancel will return -EINPROGRESS on success and does
383     // not place the event in the given memory. We have to use
384     // io_getevents to wait for all the events we cancelled.
385     int ret = 0;
386     unsigned num_events = 0;
387     int save_errno = errno;
388     errno = 0;
389 
390     for (unsigned j = start; j < end; j++) {
391         if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
392             PLOG(ERROR) << "Mtp couldn't cancel request " << j;
393         } else {
394             num_events++;
395         }
396         if (is_batch_cancel && num_events == 1) {
397             num_events = end - start;
398             break;
399         }
400     }
401     if (num_events != end - start) {
402         ret = -1;
403         errno = EIO;
404     }
405     int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
406     if (static_cast<unsigned>(evs) != num_events) {
407         PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
408         ret = -1;
409     }
410 
411     uint64_t ev_cnt = 0;
412     if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
413         PLOG(ERROR) << "Mtp Unable to read event fd";
414 
415     if (ret == 0) {
416         // Restore errno since it probably got overriden with EINPROGRESS.
417         errno = save_errno;
418     }
419     return ret;
420 }
421 
iobufSubmit(struct io_buffer * buf,int fd,unsigned length,bool read)422 int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
423     int ret = 0;
424     buf->actual = AIO_BUFS_MAX;
425     for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
426         unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
427         io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
428         buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
429         buf->iocb[j]->aio_resfd = mEventFd;
430 
431         // Not enough data, so table is truncated.
432         if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
433             buf->actual = j + 1;
434             break;
435         }
436     }
437 
438     ret = io_submit(mCtx, buf->actual, buf->iocb.data());
439     if (ret != static_cast<int>(buf->actual)) {
440         PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
441         if (ret != -1) {
442             errno = EIO;
443         }
444         ret = -1;
445     }
446     return ret;
447 }
448 
receiveFile(mtp_file_range mfr,bool zero_packet)449 int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
450     // When receiving files, the incoming length is given in 32 bits.
451     // A >=4G file is given as 0xFFFFFFFF
452     uint32_t file_length = mfr.length;
453     uint64_t offset = mfr.offset;
454 
455     struct aiocb aio;
456     aio.aio_fildes = mfr.fd;
457     aio.aio_buf = nullptr;
458     struct aiocb *aiol[] = {&aio};
459 
460     int ret = -1;
461     unsigned i = 0;
462     size_t length;
463     struct io_event ioevs[AIO_BUFS_MAX];
464     bool has_write = false;
465     bool error = false;
466     bool write_error = false;
467     int packet_size = getPacketSize(mBulkOut);
468     bool short_packet = false;
469     advise(mfr.fd);
470 
471     // Break down the file into pieces that fit in buffers
472     while (file_length > 0 || has_write) {
473         // Queue an asynchronous read from USB.
474         if (file_length > 0) {
475             length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
476             if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
477                 error = true;
478         }
479 
480         // Get the return status of the last write request.
481         if (has_write) {
482             aio_suspend(aiol, 1, nullptr);
483             int written = aio_return(&aio);
484             if (static_cast<size_t>(written) < aio.aio_nbytes) {
485                 errno = written == -1 ? aio_error(&aio) : EIO;
486                 PLOG(ERROR) << "Mtp error writing to disk";
487                 write_error = true;
488             }
489             has_write = false;
490         }
491 
492         if (error) {
493             return -1;
494         }
495 
496         // Get the result of the read request, and queue a write to disk.
497         if (file_length > 0) {
498             unsigned num_events = 0;
499             ret = 0;
500             unsigned short_i = mIobuf[i].actual;
501             while (num_events < short_i) {
502                 // Get all events up to the short read, if there is one.
503                 // We must wait for each event since data transfer could end at any time.
504                 int this_events = 0;
505                 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
506                 num_events += this_events;
507 
508                 if (event_ret == -1) {
509                     cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual,
510                             mBatchCancel);
511                     return -1;
512                 }
513                 ret += event_ret;
514                 for (int j = 0; j < this_events; j++) {
515                     // struct io_event contains a pointer to the associated struct iocb as a __u64.
516                     if (static_cast<__u64>(ioevs[j].res) <
517                             reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
518                         // We've found a short event. Store the index since
519                         // events won't necessarily arrive in the order they are queued.
520                         short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
521                             / sizeof(struct iocb) + 1;
522                         short_packet = true;
523                     }
524                 }
525             }
526             if (short_packet) {
527                 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual,
528                         mBatchCancel)) {
529                     write_error = true;
530                 }
531             }
532             if (file_length == MAX_MTP_FILE_SIZE) {
533                 // For larger files, receive until a short packet is received.
534                 if (static_cast<size_t>(ret) < length) {
535                     file_length = 0;
536                 }
537             } else if (ret < static_cast<int>(length)) {
538                 // If file is less than 4G and we get a short packet, it's an error.
539                 errno = EIO;
540                 LOG(ERROR) << "Mtp got unexpected short packet";
541                 return -1;
542             } else {
543                 file_length -= ret;
544             }
545 
546             if (write_error) {
547                 cancelTransaction();
548                 return -1;
549             }
550 
551             // Enqueue a new write request
552             aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
553             aio_write(&aio);
554 
555             offset += ret;
556             i = (i + 1) % NUM_IO_BUFS;
557             has_write = true;
558         }
559     }
560     if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
561         // Receive an empty packet if size is a multiple of the endpoint size
562         // and we didn't already get an empty packet from the header or large file.
563         if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
564             return -1;
565         }
566     }
567     return 0;
568 }
569 
sendFile(mtp_file_range mfr)570 int MtpFfsHandle::sendFile(mtp_file_range mfr) {
571     uint64_t file_length = mfr.length;
572     uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
573             file_length + sizeof(mtp_data_header));
574     uint64_t offset = mfr.offset;
575     int packet_size = getPacketSize(mBulkIn);
576 
577     // If file_length is larger than a size_t, truncating would produce the wrong comparison.
578     // Instead, promote the left side to 64 bits, then truncate the small result.
579     int init_read_len = std::min(
580             static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
581 
582     advise(mfr.fd);
583 
584     struct aiocb aio;
585     aio.aio_fildes = mfr.fd;
586     struct aiocb *aiol[] = {&aio};
587     int ret = 0;
588     int length, num_read;
589     unsigned i = 0;
590     struct io_event ioevs[AIO_BUFS_MAX];
591     bool error = false;
592     bool has_write = false;
593 
594     // Send the header data
595     mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
596     if (header == NULL) {
597         return -1;
598     }
599     header->length = htole32(given_length);
600     header->type = htole16(2); // data packet
601     header->command = htole16(mfr.command);
602     header->transaction_id = htole32(mfr.transaction_id);
603 
604     // Some hosts don't support header/data separation even though MTP allows it
605     // Handle by filling first packet with initial file data
606     if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
607                     sizeof(mtp_data_header), init_read_len, offset))
608             != init_read_len) return -1;
609     if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
610                 false, false /* zlps are handled below */) == -1)
611         return -1;
612     file_length -= init_read_len;
613     offset += init_read_len;
614     ret = init_read_len + sizeof(mtp_data_header);
615 
616     // Break down the file into pieces that fit in buffers
617     while(file_length > 0 || has_write) {
618         if (file_length > 0) {
619             // Queue up a read from disk.
620             length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
621             aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
622             aio_read(&aio);
623         }
624 
625         if (has_write) {
626             // Wait for usb write. Cancel unwritten portion if there's an error.
627             int num_events = 0;
628             if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
629                         &num_events) != ret) {
630                 error = true;
631                 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
632                         mIobuf[(i-1)%NUM_IO_BUFS].actual, false);
633             }
634             has_write = false;
635         }
636 
637         if (file_length > 0) {
638             // Wait for the previous read to finish
639             aio_suspend(aiol, 1, nullptr);
640             num_read = aio_return(&aio);
641             if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
642                 errno = num_read == -1 ? aio_error(&aio) : EIO;
643                 PLOG(ERROR) << "Mtp error reading from disk";
644                 cancelTransaction();
645                 return -1;
646             }
647 
648             file_length -= num_read;
649             offset += num_read;
650 
651             if (error) {
652                 return -1;
653             }
654 
655             // Queue up a write to usb.
656             if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
657                 return -1;
658             }
659             has_write = true;
660             ret = num_read;
661         }
662 
663         i = (i + 1) % NUM_IO_BUFS;
664     }
665 
666     if (ret % packet_size == 0) {
667         // If the last packet wasn't short, send a final empty packet
668         if (write(mIobuf[0].bufs.data(), 0) != 0) {
669             return -1;
670         }
671     }
672     return 0;
673 }
674 
sendEvent(mtp_event me)675 int MtpFfsHandle::sendEvent(mtp_event me) {
676     // Mimic the behavior of f_mtp by sending the event async.
677     // Events aren't critical to the connection, so we don't need to check the return value.
678     char *temp = new char[me.length];
679     memcpy(temp, me.data, me.length);
680     me.data = temp;
681 
682     std::thread t([this, me]() { return this->doSendEvent(me); });
683 
684     // Store the thread object for later joining
685     mChildThreads.emplace_back(std::move(t));
686     return 0;
687 }
688 
doSendEvent(mtp_event me)689 void MtpFfsHandle::doSendEvent(mtp_event me) {
690     unsigned length = me.length;
691     int ret = ::write(mIntr, me.data, length);
692     if (static_cast<unsigned>(ret) != length)
693         PLOG(ERROR) << "Mtp error sending event thread!";
694     delete[] reinterpret_cast<char*>(me.data);
695 }
696 
697 } // namespace android
698 
699