1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <assert.h>
17 #include <cutils/properties.h>
18 #include <errno.h>
19 #include <fcntl.h>
20 #include <inttypes.h>
21 #include <libgen.h>
22 #include <linux/fs.h>
23 #include <stdbool.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/syscall.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30
31 #include "checkpoint_handling.h"
32 #include "ipc.h"
33 #include "log.h"
34 #include "storage.h"
35 #include "watchdog.h"
36
37 #define FD_TBL_SIZE 64
38 #define MAX_READ_SIZE 4096
39
40 #define ALTERNATE_DATA_DIR "alternate/"
41
42 /* Maximum file size for filesystem backed storage (i.e. not block dev backed storage) */
43 static uint64_t max_file_size = 0x10000000000;
44
45 enum sync_state {
46 SS_UNUSED = -1,
47 SS_CLEAN = 0,
48 SS_DIRTY = 1,
49 SS_CLEAN_NEED_SYMLINK = 2,
50 };
51
52 static const char *ssdir_name;
53
54 /* List head for storage mapping, elements added at init, and never removed */
55 static struct storage_mapping_node* storage_mapping_head;
56
57 #ifdef VENDOR_FS_READY_PROPERTY
58
59 /*
60 * Properties set to 1 after we have opened a file under ssdir_name. The backing
61 * files for both TD and TDP are currently located under /data/vendor/ss and can
62 * only be opened once userdata is mounted. This storageproxyd service is
63 * restarted when userdata is available, which causes the Trusty storage service
64 * to reconnect and attempt to open the backing files for TD and TDP. Once we
65 * set this property, other users can expect that the Trusty storage service
66 * ports will be available (although they may block if still being initialized),
67 * and connections will not be reset after this point (assuming the
68 * storageproxyd service stays running).
69 *
70 * fs_ready - secure storage is read-only (due to checkpointing after upgrade)
71 * fs_ready_rw - secure storage is readable and writable
72 */
73 #define FS_READY_PROPERTY "ro.vendor.trusty.storage.fs_ready"
74 #define FS_READY_RW_PROPERTY "ro.vendor.trusty.storage.fs_ready_rw"
75
76 /* has FS_READY_PROPERTY been set? */
77 static bool fs_ready_set = false;
78 static bool fs_ready_rw_set = false;
79
property_set_helper(const char * prop)80 static bool property_set_helper(const char* prop) {
81 int rc = property_set(prop, "1");
82 if (rc == 0) {
83 ALOGI("Set property %s\n", prop);
84 } else {
85 ALOGE("Could not set property %s, rc: %d\n", prop, rc);
86 }
87
88 return rc == 0;
89 }
90
91 #endif // #ifdef VENDOR_FS_READY_PROPERTY
92
93 static enum sync_state fs_state;
94 static enum sync_state fd_state[FD_TBL_SIZE];
95
96 static bool alternate_mode;
97
98 static struct {
99 struct storage_file_read_resp hdr;
100 uint8_t data[MAX_READ_SIZE];
101 } read_rsp;
102
insert_fd(int open_flags,int fd,struct storage_mapping_node * node)103 static uint32_t insert_fd(int open_flags, int fd, struct storage_mapping_node* node) {
104 uint32_t handle = fd;
105
106 if (handle < FD_TBL_SIZE) {
107 fd_state[fd] = SS_CLEAN; /* fd clean */
108 if (open_flags & O_TRUNC) {
109 assert(node == NULL);
110 fd_state[fd] = SS_DIRTY; /* set fd dirty */
111 }
112
113 if (node != NULL) {
114 fd_state[fd] = SS_CLEAN_NEED_SYMLINK;
115 }
116 } else {
117 ALOGW("%s: untracked fd %u\n", __func__, fd);
118 if (open_flags & (O_TRUNC | O_CREAT)) {
119 fs_state = SS_DIRTY;
120 }
121 }
122
123 if (node != NULL) {
124 node->fd = fd;
125 }
126
127 return handle;
128 }
129
clear_fd_symlink_status(uint32_t handle,struct storage_mapping_node * entry)130 static void clear_fd_symlink_status(uint32_t handle, struct storage_mapping_node* entry) {
131 /* Always clear FD, in case fd is not in FD_TBL */
132 entry->fd = -1;
133
134 if (handle >= FD_TBL_SIZE) {
135 ALOGE("%s: untracked fd=%u\n", __func__, handle);
136 return;
137 }
138
139 if (fd_state[handle] == SS_CLEAN_NEED_SYMLINK) {
140 fd_state[handle] = SS_CLEAN;
141 }
142 }
143
get_pending_symlink_mapping(uint32_t handle)144 static struct storage_mapping_node* get_pending_symlink_mapping(uint32_t handle) {
145 /* Fast lookup failure, is it in FD TBL */
146 if (handle < FD_TBL_SIZE && fd_state[handle] != SS_CLEAN_NEED_SYMLINK) {
147 return NULL;
148 }
149
150 /* Go find our mapping */
151 struct storage_mapping_node* curr = storage_mapping_head;
152 for (; curr != NULL; curr = curr->next) {
153 if (curr->fd == handle) {
154 return curr;
155 }
156 }
157
158 /* Safety check: state inconsistent if we get here with handle inside table range */
159 assert(handle >= FD_TBL_SIZE);
160
161 return NULL;
162 };
163
possibly_symlink_and_clear_mapping(uint32_t handle)164 static int possibly_symlink_and_clear_mapping(uint32_t handle) {
165 struct storage_mapping_node* entry = get_pending_symlink_mapping(handle);
166 if (entry == NULL) {
167 /* No mappings pending */
168 return 0;
169 }
170
171 /* Create full path */
172 char* path = NULL;
173 int rc = asprintf(&path, "%s/%s", ssdir_name, entry->file_name);
174 if (rc < 0) {
175 ALOGE("%s: asprintf failed\n", __func__);
176 return -1;
177 }
178
179 /* Try and setup the symlinking */
180 ALOGI("Creating symlink %s->%s\n", path, entry->backing_storage);
181 rc = symlink(entry->backing_storage, path);
182 if (rc < 0) {
183 ALOGE("%s: error symlinking %s->%s (%s)\n", __func__, path, entry->backing_storage,
184 strerror(errno));
185 free(path);
186 return rc;
187 }
188 free(path);
189
190 clear_fd_symlink_status(handle, entry);
191
192 return rc;
193 }
194
is_pending_symlink(uint32_t handle)195 static bool is_pending_symlink(uint32_t handle) {
196 struct storage_mapping_node* entry = get_pending_symlink_mapping(handle);
197 return entry != NULL;
198 }
199
lookup_fd(uint32_t handle,bool dirty)200 static int lookup_fd(uint32_t handle, bool dirty)
201 {
202 if (dirty) {
203 if (handle < FD_TBL_SIZE) {
204 fd_state[handle] = SS_DIRTY;
205 } else {
206 fs_state = SS_DIRTY;
207 }
208 }
209 return handle;
210 }
211
remove_fd(uint32_t handle)212 static int remove_fd(uint32_t handle)
213 {
214 /* Cleanup fd in symlink mapping if it exists */
215 struct storage_mapping_node* entry = get_pending_symlink_mapping(handle);
216 if (entry != NULL) {
217 entry->fd = -1;
218 }
219
220 if (handle < FD_TBL_SIZE) {
221 fd_state[handle] = SS_UNUSED; /* set to uninstalled */
222 }
223 return handle;
224 }
225
translate_errno(int error)226 static enum storage_err translate_errno(int error)
227 {
228 enum storage_err result;
229 switch (error) {
230 case 0:
231 result = STORAGE_NO_ERROR;
232 break;
233 case EBADF:
234 case EINVAL:
235 case ENOTDIR:
236 case EISDIR:
237 case ENAMETOOLONG:
238 result = STORAGE_ERR_NOT_VALID;
239 break;
240 case ENOENT:
241 result = STORAGE_ERR_NOT_FOUND;
242 break;
243 case EEXIST:
244 result = STORAGE_ERR_EXIST;
245 break;
246 case EPERM:
247 case EACCES:
248 result = STORAGE_ERR_ACCESS;
249 break;
250 default:
251 result = STORAGE_ERR_GENERIC;
252 break;
253 }
254
255 return result;
256 }
257
write_with_retry(int fd,const void * buf_,size_t size,off_t offset)258 static ssize_t write_with_retry(int fd, const void *buf_, size_t size, off_t offset)
259 {
260 ssize_t rc;
261 const uint8_t *buf = buf_;
262
263 while (size > 0) {
264 rc = TEMP_FAILURE_RETRY(pwrite(fd, buf, size, offset));
265 if (rc < 0)
266 return rc;
267 size -= rc;
268 buf += rc;
269 offset += rc;
270 }
271 return 0;
272 }
273
read_with_retry(int fd,void * buf_,size_t size,off_t offset)274 static ssize_t read_with_retry(int fd, void *buf_, size_t size, off_t offset)
275 {
276 ssize_t rc;
277 size_t rcnt = 0;
278 uint8_t *buf = buf_;
279
280 while (size > 0) {
281 rc = TEMP_FAILURE_RETRY(pread(fd, buf, size, offset));
282 if (rc < 0)
283 return rc;
284 if (rc == 0)
285 break;
286 size -= rc;
287 buf += rc;
288 offset += rc;
289 rcnt += rc;
290 }
291 return rcnt;
292 }
293
storage_file_delete(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)294 int storage_file_delete(struct storage_msg* msg, const void* r, size_t req_len,
295 struct watcher* watcher) {
296 char *path = NULL;
297 const struct storage_file_delete_req *req = r;
298
299 if (req_len < sizeof(*req)) {
300 ALOGE("%s: invalid request length (%zd < %zd)\n",
301 __func__, req_len, sizeof(*req));
302 msg->result = STORAGE_ERR_NOT_VALID;
303 goto err_response;
304 }
305
306 size_t fname_len = strlen(req->name);
307 if (fname_len != req_len - sizeof(*req)) {
308 ALOGE("%s: invalid filename length (%zd != %zd)\n",
309 __func__, fname_len, req_len - sizeof(*req));
310 msg->result = STORAGE_ERR_NOT_VALID;
311 goto err_response;
312 }
313
314 int rc = asprintf(&path, "%s/%s", ssdir_name, req->name);
315 if (rc < 0) {
316 ALOGE("%s: asprintf failed\n", __func__);
317 msg->result = STORAGE_ERR_GENERIC;
318 goto err_response;
319 }
320
321 watch_progress(watcher, "unlinking file");
322 rc = unlink(path);
323 if (rc < 0) {
324 rc = errno;
325 if (errno == ENOENT) {
326 ALOGV("%s: error (%d) unlinking file '%s'\n",
327 __func__, rc, path);
328 } else {
329 ALOGE("%s: error (%d) unlinking file '%s'\n",
330 __func__, rc, path);
331 }
332 msg->result = translate_errno(rc);
333 goto err_response;
334 }
335
336 ALOGV("%s: \"%s\"\n", __func__, path);
337 msg->result = STORAGE_NO_ERROR;
338
339 err_response:
340 if (path)
341 free(path);
342 return ipc_respond(msg, NULL, 0);
343 }
344
sync_parent(const char * path,struct watcher * watcher)345 static void sync_parent(const char* path, struct watcher* watcher) {
346 int parent_fd;
347 watch_progress(watcher, "syncing parent");
348 char* parent_path = dirname(path);
349 parent_fd = TEMP_FAILURE_RETRY(open(parent_path, O_RDONLY));
350 if (parent_fd >= 0) {
351 fsync(parent_fd);
352 close(parent_fd);
353 } else {
354 ALOGE("%s: failed to open parent directory \"%s\" for sync: %s\n", __func__, parent_path,
355 strerror(errno));
356 }
357 watch_progress(watcher, "done syncing parent");
358 }
359
get_storage_mapping_entry(const char * source)360 static struct storage_mapping_node* get_storage_mapping_entry(const char* source) {
361 struct storage_mapping_node* curr = storage_mapping_head;
362 for (; curr != NULL; curr = curr->next) {
363 if (!strcmp(source, curr->file_name)) {
364 ALOGI("Found backing file %s for %s\n", curr->backing_storage, source);
365 return curr;
366 }
367 }
368 return NULL;
369 }
370
is_backing_storage_mapped(const char * source)371 static bool is_backing_storage_mapped(const char* source) {
372 const struct storage_mapping_node* curr = storage_mapping_head;
373 for (; curr != NULL; curr = curr->next) {
374 if (!strcmp(source, curr->backing_storage)) {
375 ALOGI("Backed storage mapping exists for %s\n", curr->backing_storage);
376 return true;
377 }
378 }
379 return false;
380 }
381
382 /* Attempts to open a backed file, if mapped, without creating the symlink. Symlink will be created
383 * later on the first write. This allows us to continue reporting zero read sizes until the first
384 * write. */
open_possibly_mapped_file(const char * short_path,const char * full_path,int open_flags,struct storage_mapping_node ** entry)385 static int open_possibly_mapped_file(const char* short_path, const char* full_path, int open_flags,
386 struct storage_mapping_node** entry) {
387 /* See if mapping exists, report upstream if there is no mapping. */
388 struct storage_mapping_node* mapping_entry = get_storage_mapping_entry(short_path);
389 if (mapping_entry == NULL) {
390 return TEMP_FAILURE_RETRY(open(full_path, open_flags, S_IRUSR | S_IWUSR));
391 }
392
393 /* Check for existence of root path, we don't allow mappings during early boot */
394 struct stat buf = {0};
395 if (stat(ssdir_name, &buf) != 0) {
396 ALOGW("Root path not accessible yet, refuse to open mappings for now.\n");
397 return -1;
398 }
399
400 /* We don't support exclusive opening of mapped files */
401 if (open_flags & O_EXCL) {
402 ALOGE("Requesting exclusive open on backed storage isn't supported: %s\n", full_path);
403 return -1;
404 }
405
406 /* Try and open mapping file */
407 open_flags &= ~(O_CREAT | O_EXCL);
408 ALOGI("%s Attempting to open mapped file: %s\n", __func__, mapping_entry->backing_storage);
409 int fd =
410 TEMP_FAILURE_RETRY(open(mapping_entry->backing_storage, open_flags, S_IRUSR | S_IWUSR));
411 if (fd < 0) {
412 ALOGE("%s Failed to open mapping file: %s\n", __func__, mapping_entry->backing_storage);
413 return -1;
414 }
415
416 /* Let caller know which entry we used for opening */
417 *entry = mapping_entry;
418 return fd;
419 }
420
storage_file_open(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)421 int storage_file_open(struct storage_msg* msg, const void* r, size_t req_len,
422 struct watcher* watcher) {
423 char* path = NULL;
424 const struct storage_file_open_req *req = r;
425 struct storage_file_open_resp resp = {0};
426 struct storage_mapping_node* mapping_entry = NULL;
427
428 if (req_len < sizeof(*req)) {
429 ALOGE("%s: invalid request length (%zd < %zd)\n",
430 __func__, req_len, sizeof(*req));
431 msg->result = STORAGE_ERR_NOT_VALID;
432 goto err_response;
433 }
434
435 size_t fname_len = strlen(req->name);
436 if (fname_len != req_len - sizeof(*req)) {
437 ALOGE("%s: invalid filename length (%zd != %zd)\n",
438 __func__, fname_len, req_len - sizeof(*req));
439 msg->result = STORAGE_ERR_NOT_VALID;
440 goto err_response;
441 }
442
443 /*
444 * TODO(b/210501710): Expose GSI image running state to vendor
445 * storageproxyd. We want to control data file paths in vendor_init, but we
446 * don't have access to the necessary property there yet. When we have
447 * access to that property we can set the root data path read-only and only
448 * allow creation of files in alternate/. Checking paths here temporarily
449 * until that is fixed.
450 *
451 * We are just checking for "/" instead of "alternate/" because we still
452 * want to still allow access to "persist/" in alternate mode (for now, this
453 * may change in the future).
454 */
455 if (alternate_mode && !strchr(req->name, '/')) {
456 ALOGE("%s: Cannot open root data file \"%s\" in alternate mode\n", __func__, req->name);
457 msg->result = STORAGE_ERR_ACCESS;
458 goto err_response;
459 }
460
461 int rc = asprintf(&path, "%s/%s", ssdir_name, req->name);
462 if (rc < 0) {
463 ALOGE("%s: asprintf failed\n", __func__);
464 msg->result = STORAGE_ERR_GENERIC;
465 goto err_response;
466 }
467
468 int open_flags = O_RDWR;
469
470 if (req->flags & STORAGE_FILE_OPEN_TRUNCATE)
471 open_flags |= O_TRUNC;
472
473 if (req->flags & STORAGE_FILE_OPEN_CREATE) {
474 /*
475 * Create the alternate parent dir if needed & allowed.
476 *
477 * TODO(b/210501710): Expose GSI image running state to vendor
478 * storageproxyd. This directory should be created by vendor_init, once
479 * it has access to the necessary bit of information.
480 */
481 if (strstr(req->name, ALTERNATE_DATA_DIR) == req->name) {
482 char* parent_path = dirname(path);
483 rc = mkdir(parent_path, S_IRWXU);
484 if (rc == 0) {
485 sync_parent(parent_path, watcher);
486 } else if (errno != EEXIST) {
487 ALOGE("%s: Could not create parent directory \"%s\": %s\n", __func__, parent_path,
488 strerror(errno));
489 }
490 }
491
492 /* open or create */
493 if (req->flags & STORAGE_FILE_OPEN_CREATE_EXCLUSIVE) {
494 /* create exclusive */
495 open_flags |= O_CREAT | O_EXCL;
496
497 /* Look for and attempt opening a mapping, else just do normal open. */
498 rc = open_possibly_mapped_file(req->name, path, open_flags, &mapping_entry);
499 } else {
500 /* try open first */
501 rc = TEMP_FAILURE_RETRY(open(path, open_flags, S_IRUSR | S_IWUSR));
502 if (rc == -1 && errno == ENOENT) {
503 /* then try open with O_CREATE */
504 open_flags |= O_CREAT;
505
506 /* Look for and attempt opening a mapping, else just do normal open. */
507 rc = open_possibly_mapped_file(req->name, path, open_flags, &mapping_entry);
508 }
509
510 }
511 } else {
512 /* open an existing file */
513 rc = TEMP_FAILURE_RETRY(open(path, open_flags, S_IRUSR | S_IWUSR));
514 }
515
516 if (rc < 0) {
517 rc = errno;
518 if (errno == EEXIST || errno == ENOENT) {
519 ALOGV("%s: failed to open file \"%s\": %s\n",
520 __func__, path, strerror(errno));
521 } else {
522 ALOGE("%s: failed to open file \"%s\": %s\n",
523 __func__, path, strerror(errno));
524 }
525 msg->result = translate_errno(rc);
526 goto err_response;
527 }
528
529 if (open_flags & O_CREAT) {
530 sync_parent(path, watcher);
531 }
532
533 /* at this point rc contains storage file fd */
534 msg->result = STORAGE_NO_ERROR;
535 resp.handle = insert_fd(open_flags, rc, mapping_entry);
536 ALOGV("%s: \"%s\": fd = %u: handle = %d\n",
537 __func__, path, rc, resp.handle);
538
539 free(path);
540 path = NULL;
541
542 #ifdef VENDOR_FS_READY_PROPERTY
543 /* a backing file has been opened, notify any waiting init steps */
544 if (!fs_ready_set || !fs_ready_rw_set) {
545 bool is_checkpoint_active = false;
546
547 rc = is_data_checkpoint_active(&is_checkpoint_active);
548 if (rc != 0) {
549 ALOGE("is_data_checkpoint_active() failed (%d)\n", rc);
550 } else {
551 if (!fs_ready_rw_set && !is_checkpoint_active) {
552 fs_ready_rw_set = property_set_helper(FS_READY_RW_PROPERTY);
553 }
554
555 if (!fs_ready_set) {
556 fs_ready_set = property_set_helper(FS_READY_PROPERTY);
557 }
558 }
559 }
560 #endif // #ifdef VENDOR_FS_READY_PROPERTY
561
562 return ipc_respond(msg, &resp, sizeof(resp));
563
564 err_response:
565 if (path)
566 free(path);
567 return ipc_respond(msg, NULL, 0);
568 }
569
storage_file_close(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)570 int storage_file_close(struct storage_msg* msg, const void* r, size_t req_len,
571 struct watcher* watcher) {
572 const struct storage_file_close_req *req = r;
573
574 if (req_len != sizeof(*req)) {
575 ALOGE("%s: invalid request length (%zd != %zd)\n",
576 __func__, req_len, sizeof(*req));
577 msg->result = STORAGE_ERR_NOT_VALID;
578 goto err_response;
579 }
580
581 int fd = remove_fd(req->handle);
582 ALOGV("%s: handle = %u: fd = %u\n", __func__, req->handle, fd);
583
584 watch_progress(watcher, "fsyncing before file close");
585 int rc = fsync(fd);
586 watch_progress(watcher, "done fsyncing before file close");
587 if (rc < 0) {
588 rc = errno;
589 ALOGE("%s: fsync failed for fd=%u: %s\n",
590 __func__, fd, strerror(errno));
591 msg->result = translate_errno(rc);
592 goto err_response;
593 }
594
595 rc = close(fd);
596 if (rc < 0) {
597 rc = errno;
598 ALOGE("%s: close failed for fd=%u: %s\n",
599 __func__, fd, strerror(errno));
600 msg->result = translate_errno(rc);
601 goto err_response;
602 }
603
604 msg->result = STORAGE_NO_ERROR;
605
606 err_response:
607 return ipc_respond(msg, NULL, 0);
608 }
609
storage_file_write(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)610 int storage_file_write(struct storage_msg* msg, const void* r, size_t req_len,
611 struct watcher* watcher) {
612 int rc;
613 const struct storage_file_write_req *req = r;
614
615 if (req_len < sizeof(*req)) {
616 ALOGE("%s: invalid request length (%zd < %zd)\n",
617 __func__, req_len, sizeof(*req));
618 msg->result = STORAGE_ERR_NOT_VALID;
619 goto err_response;
620 }
621
622 /* Handle any delayed symlinking for this handle if any */
623 rc = possibly_symlink_and_clear_mapping(req->handle);
624 if (rc < 0) {
625 ALOGE("Failed to symlink storage\n");
626 msg->result = STORAGE_ERR_GENERIC;
627 goto err_response;
628 }
629
630 int fd = lookup_fd(req->handle, true);
631 watch_progress(watcher, "writing");
632 if (write_with_retry(fd, &req->data[0], req_len - sizeof(*req),
633 req->offset) < 0) {
634 watch_progress(watcher, "writing done w/ error");
635 rc = errno;
636 ALOGW("%s: error writing file (fd=%d): %s\n",
637 __func__, fd, strerror(errno));
638 msg->result = translate_errno(rc);
639 goto err_response;
640 }
641 watch_progress(watcher, "writing done");
642
643 if (msg->flags & STORAGE_MSG_FLAG_POST_COMMIT) {
644 rc = storage_sync_checkpoint(watcher);
645 if (rc < 0) {
646 msg->result = STORAGE_ERR_SYNC_FAILURE;
647 goto err_response;
648 }
649 }
650
651 msg->result = STORAGE_NO_ERROR;
652
653 err_response:
654 return ipc_respond(msg, NULL, 0);
655 }
656
storage_file_read(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)657 int storage_file_read(struct storage_msg* msg, const void* r, size_t req_len,
658 struct watcher* watcher) {
659 int rc;
660 const struct storage_file_read_req *req = r;
661
662 if (req_len != sizeof(*req)) {
663 ALOGE("%s: invalid request length (%zd != %zd)\n",
664 __func__, req_len, sizeof(*req));
665 msg->result = STORAGE_ERR_NOT_VALID;
666 goto err_response;
667 }
668
669 if (req->size > MAX_READ_SIZE) {
670 ALOGW("%s: request is too large (%u > %d) - refusing\n",
671 __func__, req->size, MAX_READ_SIZE);
672 msg->result = STORAGE_ERR_NOT_VALID;
673 goto err_response;
674 }
675
676 /* If this handle has a delayed symlink we should report 0 size reads until first write occurs
677 */
678 if (is_pending_symlink(req->handle)) {
679 ALOGI("Pending symlink: Forcing read result 0.\n");
680 msg->result = STORAGE_NO_ERROR;
681 return ipc_respond(msg, &read_rsp, sizeof(read_rsp.hdr));
682 }
683
684 int fd = lookup_fd(req->handle, false);
685 watch_progress(watcher, "reading");
686 ssize_t read_res = read_with_retry(fd, read_rsp.hdr.data, req->size,
687 (off_t)req->offset);
688 watch_progress(watcher, "reading done");
689 if (read_res < 0) {
690 rc = errno;
691 ALOGW("%s: error reading file (fd=%d): %s\n",
692 __func__, fd, strerror(errno));
693 msg->result = translate_errno(rc);
694 goto err_response;
695 }
696
697 msg->result = STORAGE_NO_ERROR;
698 return ipc_respond(msg, &read_rsp, read_res + sizeof(read_rsp.hdr));
699
700 err_response:
701 return ipc_respond(msg, NULL, 0);
702 }
703
storage_file_get_size(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)704 int storage_file_get_size(struct storage_msg* msg, const void* r, size_t req_len,
705 struct watcher* watcher) {
706 const struct storage_file_get_size_req *req = r;
707 struct storage_file_get_size_resp resp = {0};
708
709 if (req_len != sizeof(*req)) {
710 ALOGE("%s: invalid request length (%zd != %zd)\n",
711 __func__, req_len, sizeof(*req));
712 msg->result = STORAGE_ERR_NOT_VALID;
713 goto err_response;
714 }
715
716 struct stat stat;
717 int fd = lookup_fd(req->handle, false);
718 watch_progress(watcher, "fstat");
719 int rc = fstat(fd, &stat);
720 watch_progress(watcher, "fstat done");
721 if (rc < 0) {
722 rc = errno;
723 ALOGE("%s: error stat'ing file (fd=%d): %s\n",
724 __func__, fd, strerror(errno));
725 msg->result = translate_errno(rc);
726 goto err_response;
727 }
728
729 resp.size = stat.st_size;
730 msg->result = STORAGE_NO_ERROR;
731 return ipc_respond(msg, &resp, sizeof(resp));
732
733 err_response:
734 return ipc_respond(msg, NULL, 0);
735 }
736
storage_file_set_size(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)737 int storage_file_set_size(struct storage_msg* msg, const void* r, size_t req_len,
738 struct watcher* watcher) {
739 const struct storage_file_set_size_req *req = r;
740
741 if (req_len != sizeof(*req)) {
742 ALOGE("%s: invalid request length (%zd != %zd)\n",
743 __func__, req_len, sizeof(*req));
744 msg->result = STORAGE_ERR_NOT_VALID;
745 goto err_response;
746 }
747
748 int fd = lookup_fd(req->handle, true);
749 watch_progress(watcher, "ftruncate");
750 int rc = TEMP_FAILURE_RETRY(ftruncate(fd, req->size));
751 watch_progress(watcher, "ftruncate done");
752 if (rc < 0) {
753 rc = errno;
754 ALOGE("%s: error truncating file (fd=%d): %s\n",
755 __func__, fd, strerror(errno));
756 msg->result = translate_errno(rc);
757 goto err_response;
758 }
759
760 msg->result = STORAGE_NO_ERROR;
761
762 err_response:
763 return ipc_respond(msg, NULL, 0);
764 }
765
storage_file_get_max_size(struct storage_msg * msg,const void * r,size_t req_len,struct watcher * watcher)766 int storage_file_get_max_size(struct storage_msg* msg, const void* r, size_t req_len,
767 struct watcher* watcher) {
768 const struct storage_file_get_max_size_req* req = r;
769 struct storage_file_get_max_size_resp resp = {0};
770 uint64_t max_size = 0;
771
772 if (req_len != sizeof(*req)) {
773 ALOGE("%s: invalid request length (%zd != %zd)\n", __func__, req_len, sizeof(*req));
774 msg->result = STORAGE_ERR_NOT_VALID;
775 goto err_response;
776 }
777
778 struct stat stat;
779 int fd = lookup_fd(req->handle, false);
780 watch_progress(watcher, "fstat to get max size");
781 int rc = fstat(fd, &stat);
782 watch_progress(watcher, "fstat to get max size done");
783 if (rc < 0) {
784 ALOGE("%s: error stat'ing file (fd=%d): %s\n", __func__, fd, strerror(errno));
785 goto err_response;
786 }
787
788 if ((stat.st_mode & S_IFMT) == S_IFBLK) {
789 rc = ioctl(fd, BLKGETSIZE64, &max_size);
790 if (rc < 0) {
791 rc = errno;
792 ALOGE("%s: error calling ioctl on file (fd=%d): %s\n", __func__, fd, strerror(errno));
793 msg->result = translate_errno(rc);
794 goto err_response;
795 }
796 } else {
797 max_size = max_file_size;
798 }
799
800 resp.max_size = max_size;
801 msg->result = STORAGE_NO_ERROR;
802 return ipc_respond(msg, &resp, sizeof(resp));
803
804 err_response:
805 return ipc_respond(msg, NULL, 0);
806 }
807
determine_max_file_size(const char * max_file_size_from)808 int determine_max_file_size(const char* max_file_size_from) {
809 /* Use default if none passed in */
810 if (max_file_size_from == NULL) {
811 ALOGI("No max file source given, continuing to use default: 0x%" PRIx64 "\n",
812 max_file_size);
813 return 0;
814 }
815
816 /* Check that max_file_size_from is part of our mapping list. */
817 if (!is_backing_storage_mapped(max_file_size_from)) {
818 ALOGE("%s: file doesn't match mapped storages (filename=%s)\n", __func__,
819 max_file_size_from);
820 return -1;
821 }
822
823 ALOGI("Using %s to determine max file size.\n", max_file_size_from);
824
825 /* Error if max file size source not found, possible misconfig. */
826 struct stat buf = {0};
827 int rc = stat(max_file_size_from, &buf);
828 if (rc < 0) {
829 ALOGE("%s: error stat'ing file (filename=%s): %s\n", __func__, max_file_size_from,
830 strerror(errno));
831 return -1;
832 }
833
834 /* Currently only support block device as max file size source */
835 if ((buf.st_mode & S_IFMT) != S_IFBLK) {
836 ALOGE("Unsupported max file size source type: %d\n", buf.st_mode);
837 return -1;
838 }
839
840 ALOGI("%s is a block device, determining block device size\n", max_file_size_from);
841 uint64_t max_size = 0;
842 int fd = TEMP_FAILURE_RETRY(open(max_file_size_from, O_RDONLY | O_NONBLOCK));
843 if (fd < 0) {
844 ALOGE("%s: failed to open backing file %s for ioctl: %s\n", __func__, max_file_size_from,
845 strerror(errno));
846 return -1;
847 }
848 rc = ioctl(fd, BLKGETSIZE64, &max_size);
849 if (rc < 0) {
850 ALOGE("%s: error calling ioctl on file (fd=%d): %s\n", __func__, fd, strerror(errno));
851 close(fd);
852 return -1;
853 }
854 close(fd);
855 max_file_size = max_size;
856
857 ALOGI("Using 0x%" PRIx64 " as max file size\n", max_file_size);
858 return 0;
859 }
860
storage_init(const char * dirname,struct storage_mapping_node * mappings,const char * max_file_size_from)861 int storage_init(const char* dirname, struct storage_mapping_node* mappings,
862 const char* max_file_size_from) {
863 /* If there is an active DSU image, use the alternate fs mode. */
864 alternate_mode = is_gsi_running();
865
866 fs_state = SS_CLEAN;
867 for (uint i = 0; i < FD_TBL_SIZE; i++) {
868 fd_state[i] = SS_UNUSED; /* uninstalled */
869 }
870
871 ssdir_name = dirname;
872
873 storage_mapping_head = mappings;
874
875 /* Set the max file size based on incoming configuration */
876 int rc = determine_max_file_size(max_file_size_from);
877 if (rc < 0) {
878 return rc;
879 }
880
881 return 0;
882 }
883
storage_sync_checkpoint(struct watcher * watcher)884 int storage_sync_checkpoint(struct watcher* watcher) {
885 int rc;
886
887 watch_progress(watcher, "sync fd table");
888 /* sync fd table and reset it to clean state first */
889 for (uint fd = 0; fd < FD_TBL_SIZE; fd++) {
890 if (fd_state[fd] == SS_DIRTY) {
891 if (fs_state == SS_CLEAN) {
892 /* need to sync individual fd */
893 rc = fsync(fd);
894 if (rc < 0) {
895 ALOGE("fsync for fd=%d failed: %s\n", fd, strerror(errno));
896 return rc;
897 }
898 }
899 fd_state[fd] = SS_CLEAN; /* set to clean */
900 }
901 }
902
903 /* check if we need to sync all filesystems */
904 if (fs_state == SS_DIRTY) {
905 /*
906 * We sync all filesystems here because we don't know what filesystem
907 * needs syncing if there happen to be other filesystems symlinked under
908 * the root data directory. This should not happen in the normal case
909 * because our fd table is large enough to handle the few open files we
910 * use.
911 */
912 watch_progress(watcher, "all fs sync");
913 sync();
914 fs_state = SS_CLEAN;
915 }
916
917 watch_progress(watcher, "done syncing");
918
919 return 0;
920 }
921