2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <stdatomic.h>
43 #include <sys/socket.h>
45 #include <sys/select.h>
47 #include <sys/types.h>
48 #include <netinet/in.h>
50 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
51 #include <sys/_system_properties.h>
52 #include <sys/system_properties.h>
54 #include "private/bionic_atomic_inline.h"
55 #include "private/bionic_futex.h"
56 #include "private/bionic_macros.h"
58 static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME;
62 * Properties are stored in a hybrid trie/binary tree structure.
63 * Each property's name is delimited at '.' characters, and the tokens are put
64 * into a trie structure. Siblings at each level of the trie are stored in a
65 * binary tree. For instance, "ro.secure"="1" could be stored as follows:
67 * +-----+ children +----+ children +--------+
68 * | |-------------->| ro |-------------->| secure |
69 * +-----+ +----+ +--------+
71 * left / \ right left / | prop +===========+
72 * v v v +-------->| ro.secure |
73 * +-----+ +-----+ +-----+ +-----------+
74 * | net | | sys | | com | | 1 |
75 * +-----+ +-----+ +-----+ +===========+
78 // Represents a node in the trie.
83 // TODO: The following fields should be declared as atomic_uint32_t.
84 // They should be assigned to with release semantics, instead of using
85 // explicit fences. Unfortunately, the read accesses are generally
86 // followed by more dependent read accesses, and the dependence
87 // is assumed to enforce memory ordering. Which it does on supported
88 // hardware. This technically should use memory_order_consume, if
89 // that worked as intended.
90 // We should also avoid rereading these fields redundantly, since not
91 // all processor implementations ensure that multiple loads from the
92 // same field are carried out in the right order.
93 volatile uint32_t prop;
95 volatile uint32_t left;
96 volatile uint32_t right;
98 volatile uint32_t children;
102 prop_bt(const char *name, const uint8_t name_length) {
103 this->namelen = name_length;
104 memcpy(this->name, name, name_length);
105 this->name[name_length] = '\0';
106 ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store
107 // for subsequent pointer assignment.
111 DISALLOW_COPY_AND_ASSIGN(prop_bt);
116 atomic_uint_least32_t serial;
119 uint32_t reserved[28];
122 prop_area(const uint32_t magic, const uint32_t version) :
123 magic(magic), version(version) {
124 atomic_init(&serial, 0);
125 memset(reserved, 0, sizeof(reserved));
126 // Allocate enough space for the root node.
127 bytes_used = sizeof(prop_bt);
131 DISALLOW_COPY_AND_ASSIGN(prop_area);
135 atomic_uint_least32_t serial;
136 char value[PROP_VALUE_MAX];
139 prop_info(const char *name, const uint8_t namelen, const char *value,
140 const uint8_t valuelen) {
141 memcpy(this->name, name, namelen);
142 this->name[namelen] = '\0';
143 atomic_init(&this->serial, valuelen << 24);
144 memcpy(this->value, value, valuelen);
145 this->value[valuelen] = '\0';
146 ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store
147 // for subsequent point assignment.
150 DISALLOW_COPY_AND_ASSIGN(prop_info);
153 struct find_nth_cookie {
158 find_nth_cookie(uint32_t n) : count(0), n(n), pi(NULL) {
162 static char property_filename[PATH_MAX] = PROP_FILENAME;
163 static bool compat_mode = false;
164 static size_t pa_data_size;
165 static size_t pa_size;
167 // NOTE: This isn't static because system_properties_compat.c
169 prop_area *__system_property_area__ = NULL;
171 static int get_fd_from_env(void)
173 // This environment variable consistes of two decimal integer
174 // values separated by a ",". The first value is a file descriptor
175 // and the second is the size of the system properties area. The
176 // size is currently unused.
177 char *env = getenv("ANDROID_PROPERTY_WORKSPACE");
186 static int map_prop_area_rw()
188 /* dev is a tmpfs that we can use to carve a shared workspace
189 * out of, so let's do that...
191 const int fd = open(property_filename,
192 O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
195 if (errno == EACCES) {
196 /* for consistency with the case where the process has already
197 * mapped the page in and segfaults when trying to write to it
204 // TODO: Is this really required ? Does android run on any kernels that
205 // don't support O_CLOEXEC ?
206 const int ret = fcntl(fd, F_SETFD, FD_CLOEXEC);
212 if (ftruncate(fd, PA_SIZE) < 0) {
218 pa_data_size = pa_size - sizeof(prop_area);
221 void *const memory_area = mmap(NULL, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
222 if (memory_area == MAP_FAILED) {
227 prop_area *pa = new(memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
229 /* plug into the lib property services */
230 __system_property_area__ = pa;
236 static int map_fd_ro(const int fd) {
238 if (fstat(fd, &fd_stat) < 0) {
242 if ((fd_stat.st_uid != 0)
243 || (fd_stat.st_gid != 0)
244 || ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0)
245 || (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area))) ) {
249 pa_size = fd_stat.st_size;
250 pa_data_size = pa_size - sizeof(prop_area);
252 void* const map_result = mmap(NULL, pa_size, PROT_READ, MAP_SHARED, fd, 0);
253 if (map_result == MAP_FAILED) {
257 prop_area* pa = reinterpret_cast<prop_area*>(map_result);
258 if ((pa->magic != PROP_AREA_MAGIC) || (pa->version != PROP_AREA_VERSION &&
259 pa->version != PROP_AREA_VERSION_COMPAT)) {
264 if (pa->version == PROP_AREA_VERSION_COMPAT) {
268 __system_property_area__ = pa;
272 static int map_prop_area()
274 int fd(open(property_filename, O_RDONLY | O_NOFOLLOW | O_CLOEXEC));
276 /* For old kernels that don't support O_CLOEXEC */
277 const int ret = fcntl(fd, F_SETFD, FD_CLOEXEC);
284 bool close_fd = true;
285 if ((fd < 0) && (errno == ENOENT)) {
287 * For backwards compatibility, if the file doesn't
288 * exist, we use the environment to get the file descriptor.
289 * For security reasons, we only use this backup if the kernel
290 * returns ENOENT. We don't want to use the backup if the kernel
291 * returns other errors such as ENOMEM or ENFILE, since it
292 * might be possible for an external program to trigger this
295 fd = get_fd_from_env();
303 const int map_result = map_fd_ro(fd);
311 static void *allocate_obj(const size_t size, uint32_t *const off)
313 prop_area *pa = __system_property_area__;
314 const size_t aligned = BIONIC_ALIGN(size, sizeof(uint32_t));
315 if (pa->bytes_used + aligned > pa_data_size) {
319 *off = pa->bytes_used;
320 pa->bytes_used += aligned;
321 return pa->data + *off;
324 static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const off)
327 void *const offset = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
329 prop_bt* bt = new(offset) prop_bt(name, namelen);
337 static prop_info *new_prop_info(const char *name, uint8_t namelen,
338 const char *value, uint8_t valuelen, uint32_t *const off)
341 void* const offset = allocate_obj(sizeof(prop_info) + namelen + 1, &off_tmp);
343 prop_info* info = new(offset) prop_info(name, namelen, value, valuelen);
351 static void *to_prop_obj(const uint32_t off)
353 if (off > pa_data_size)
355 if (!__system_property_area__)
358 return (__system_property_area__->data + off);
361 static prop_bt *root_node()
363 return reinterpret_cast<prop_bt*>(to_prop_obj(0));
366 static int cmp_prop_name(const char *one, uint8_t one_len, const char *two,
369 if (one_len < two_len)
371 else if (one_len > two_len)
374 return strncmp(one, two, one_len);
377 static prop_bt *find_prop_bt(prop_bt *const bt, const char *name,
378 uint8_t namelen, bool alloc_if_needed)
381 prop_bt* current = bt;
387 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
394 current = reinterpret_cast<prop_bt*>(to_prop_obj(current->left));
396 if (!alloc_if_needed) {
400 // Note that there isn't a race condition here. "clients" never
401 // reach this code-path since It's only the (single threaded) server
402 // that allocates new nodes. Though "bt->left" is volatile, it can't
403 // have changed since the last value was last read.
404 uint32_t new_offset = 0;
405 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
407 current->left = new_offset;
412 if (current->right) {
413 current = reinterpret_cast<prop_bt*>(to_prop_obj(current->right));
415 if (!alloc_if_needed) {
420 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
422 current->right = new_offset;
430 static const prop_info *find_property(prop_bt *const trie, const char *name,
431 uint8_t namelen, const char *value, uint8_t valuelen,
432 bool alloc_if_needed)
434 if (!trie) return NULL;
436 const char *remaining_name = name;
437 prop_bt* current = trie;
439 const char *sep = strchr(remaining_name, '.');
440 const bool want_subtree = (sep != NULL);
441 const uint8_t substr_size = (want_subtree) ?
442 sep - remaining_name : strlen(remaining_name);
448 prop_bt* root = NULL;
449 if (current->children) {
450 root = reinterpret_cast<prop_bt*>(to_prop_obj(current->children));
451 } else if (alloc_if_needed) {
452 uint32_t new_bt_offset;
453 root = new_prop_bt(remaining_name, substr_size, &new_bt_offset);
455 current->children = new_bt_offset;
463 current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
471 remaining_name = sep + 1;
475 return reinterpret_cast<prop_info*>(to_prop_obj(current->prop));
476 } else if (alloc_if_needed) {
477 uint32_t new_info_offset;
478 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_info_offset);
480 current->prop = new_info_offset;
489 static int send_prop_msg(const prop_msg *msg)
491 const int fd = socket(AF_LOCAL, SOCK_STREAM, 0);
496 const size_t namelen = strlen(property_service_socket);
499 memset(&addr, 0, sizeof(addr));
500 strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path));
501 addr.sun_family = AF_LOCAL;
502 socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1;
503 if (TEMP_FAILURE_RETRY(connect(fd, reinterpret_cast<sockaddr*>(&addr), alen)) < 0) {
508 const int num_bytes = TEMP_FAILURE_RETRY(send(fd, msg, sizeof(prop_msg), 0));
511 if (num_bytes == sizeof(prop_msg)) {
512 // We successfully wrote to the property server but now we
513 // wait for the property server to finish its work. It
514 // acknowledges its completion by closing the socket so we
515 // poll here (on nothing), waiting for the socket to close.
516 // If you 'adb shell setprop foo bar' you'll see the POLLHUP
517 // once the socket closes. Out of paranoia we cap our poll
521 pollfds[0].events = 0;
522 const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */));
523 if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) {
526 // Ignore the timeout and treat it like a success anyway.
527 // The init process is single-threaded and its property
528 // service is sometimes slow to respond (perhaps it's off
529 // starting a child process or something) and thus this
530 // times out and the caller thinks it failed, even though
531 // it's still getting around to it. So we fake it here,
532 // mostly for ctl.* properties, but we do try and wait 250
533 // ms so callers who do read-after-write can reliably see
534 // what they've written. Most of the time.
535 // TODO: fix the system properties design.
544 static void find_nth_fn(const prop_info *pi, void *ptr)
546 find_nth_cookie *cookie = reinterpret_cast<find_nth_cookie*>(ptr);
548 if (cookie->n == cookie->count)
554 static int foreach_property(const uint32_t off,
555 void (*propfn)(const prop_info *pi, void *cookie), void *cookie)
557 prop_bt *trie = reinterpret_cast<prop_bt*>(to_prop_obj(off));
562 const int err = foreach_property(trie->left, propfn, cookie);
567 prop_info *info = reinterpret_cast<prop_info*>(to_prop_obj(trie->prop));
570 propfn(info, cookie);
572 if (trie->children) {
573 const int err = foreach_property(trie->children, propfn, cookie);
578 const int err = foreach_property(trie->right, propfn, cookie);
586 int __system_properties_init()
588 return map_prop_area();
591 int __system_property_set_filename(const char *filename)
593 size_t len = strlen(filename);
594 if (len >= sizeof(property_filename))
597 strcpy(property_filename, filename);
601 int __system_property_area_init()
603 return map_prop_area_rw();
606 const prop_info *__system_property_find(const char *name)
608 if (__predict_false(compat_mode)) {
609 return __system_property_find_compat(name);
611 return find_property(root_node(), name, strlen(name), NULL, 0, false);
614 // The C11 standard doesn't allow atomic loads from const fields,
615 // though C++11 does. Fudge it until standards get straightened out.
616 static inline uint_least32_t load_const_atomic(const atomic_uint_least32_t* s,
618 atomic_uint_least32_t* non_const_s = const_cast<atomic_uint_least32_t*>(s);
619 return atomic_load_explicit(non_const_s, mo);
622 int __system_property_read(const prop_info *pi, char *name, char *value)
624 if (__predict_false(compat_mode)) {
625 return __system_property_read_compat(pi, name, value);
629 uint32_t serial = __system_property_serial(pi); // acquire semantics
630 size_t len = SERIAL_VALUE_LEN(serial);
631 memcpy(value, pi->value, len + 1);
632 // TODO: Fix the synchronization scheme here.
633 // There is no fully supported way to implement this kind
634 // of synchronization in C++11, since the memcpy races with
635 // updates to pi, and the data being accessed is not atomic.
636 // The following fence is unintuitive, but would be the
637 // correct one if memcpy used memory_order_relaxed atomic accesses.
638 // In practice it seems unlikely that the generated code would
639 // would be any different, so this should be OK.
640 atomic_thread_fence(memory_order_acquire);
642 load_const_atomic(&(pi->serial), memory_order_relaxed)) {
644 strcpy(name, pi->name);
651 int __system_property_get(const char *name, char *value)
653 const prop_info *pi = __system_property_find(name);
656 return __system_property_read(pi, 0, value);
663 int __system_property_set(const char *key, const char *value)
665 if (key == 0) return -1;
666 if (value == 0) value = "";
667 if (strlen(key) >= PROP_NAME_MAX) return -1;
668 if (strlen(value) >= PROP_VALUE_MAX) return -1;
671 memset(&msg, 0, sizeof msg);
672 msg.cmd = PROP_MSG_SETPROP;
673 strlcpy(msg.name, key, sizeof msg.name);
674 strlcpy(msg.value, value, sizeof msg.value);
676 const int err = send_prop_msg(&msg);
684 int __system_property_update(prop_info *pi, const char *value, unsigned int len)
686 prop_area *pa = __system_property_area__;
688 if (len >= PROP_VALUE_MAX)
691 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed);
693 atomic_store_explicit(&pi->serial, serial, memory_order_relaxed);
694 // The memcpy call here also races. Again pretend it
695 // used memory_order_relaxed atomics, and use the analogous
696 // counterintuitive fence.
697 atomic_thread_fence(memory_order_release);
698 memcpy(pi->value, value, len + 1);
699 atomic_store_explicit(
701 (len << 24) | ((serial + 1) & 0xffffff),
702 memory_order_release);
703 __futex_wake(&pi->serial, INT32_MAX);
705 atomic_store_explicit(
707 atomic_load_explicit(&pa->serial, memory_order_relaxed) + 1,
708 memory_order_release);
709 __futex_wake(&pa->serial, INT32_MAX);
714 int __system_property_add(const char *name, unsigned int namelen,
715 const char *value, unsigned int valuelen)
717 prop_area *pa = __system_property_area__;
720 if (namelen >= PROP_NAME_MAX)
722 if (valuelen >= PROP_VALUE_MAX)
727 pi = find_property(root_node(), name, namelen, value, valuelen, true);
731 // There is only a single mutator, but we want to make sure that
732 // updates are visible to a reader waiting for the update.
733 atomic_store_explicit(
735 atomic_load_explicit(&pa->serial, memory_order_relaxed) + 1,
736 memory_order_release);
737 __futex_wake(&pa->serial, INT32_MAX);
741 // Wait for non-locked serial, and retrieve it with acquire semantics.
742 unsigned int __system_property_serial(const prop_info *pi)
744 uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire);
745 while (SERIAL_DIRTY(serial)) {
746 __futex_wait(const_cast<volatile void *>(
747 reinterpret_cast<const void *>(&pi->serial)),
749 serial = load_const_atomic(&pi->serial, memory_order_acquire);
754 unsigned int __system_property_wait_any(unsigned int serial)
756 prop_area *pa = __system_property_area__;
760 __futex_wait(&pa->serial, serial, NULL);
761 my_serial = atomic_load_explicit(&pa->serial, memory_order_acquire);
762 } while (my_serial == serial);
767 const prop_info *__system_property_find_nth(unsigned n)
769 find_nth_cookie cookie(n);
771 const int err = __system_property_foreach(find_nth_fn, &cookie);
779 int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie),
782 if (__predict_false(compat_mode)) {
783 return __system_property_foreach_compat(propfn, cookie);
786 return foreach_property(0, propfn, cookie);