2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <stdatomic.h>
42 #include <linux/xattr.h>
43 #include <netinet/in.h>
45 #include <sys/select.h>
46 #include <sys/socket.h>
48 #include <sys/types.h>
51 #include <sys/xattr.h>
53 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
54 #include <sys/_system_properties.h>
55 #include <sys/system_properties.h>
57 #include <async_safe/log.h>
59 #include "private/ErrnoRestorer.h"
60 #include "private/bionic_futex.h"
61 #include "private/bionic_lock.h"
62 #include "private/bionic_macros.h"
63 #include "private/bionic_sdk_version.h"
65 static constexpr int PROP_FILENAME_MAX = 1024;
67 static constexpr uint32_t PROP_AREA_MAGIC = 0x504f5250;
68 static constexpr uint32_t PROP_AREA_VERSION = 0xfc6ed0ab;
70 static constexpr size_t PA_SIZE = 128 * 1024;
72 #define SERIAL_DIRTY(serial) ((serial)&1)
73 #define SERIAL_VALUE_LEN(serial) ((serial) >> 24)
75 static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME;
76 static const char* kServiceVersionPropertyName = "ro.property_service.version";
79 * Properties are stored in a hybrid trie/binary tree structure.
80 * Each property's name is delimited at '.' characters, and the tokens are put
81 * into a trie structure. Siblings at each level of the trie are stored in a
82 * binary tree. For instance, "ro.secure"="1" could be stored as follows:
84 * +-----+ children +----+ children +--------+
85 * | |-------------->| ro |-------------->| secure |
86 * +-----+ +----+ +--------+
88 * left / \ right left / | prop +===========+
89 * v v v +-------->| ro.secure |
90 * +-----+ +-----+ +-----+ +-----------+
91 * | net | | sys | | com | | 1 |
92 * +-----+ +-----+ +-----+ +===========+
95 // Represents a node in the trie.
99 // The property trie is updated only by the init process (single threaded) which provides
100 // property service. And it can be read by multiple threads at the same time.
101 // As the property trie is not protected by locks, we use atomic_uint_least32_t types for the
102 // left, right, children "pointers" in the trie node. To make sure readers who see the
103 // change of "pointers" can also notice the change of prop_bt structure contents pointed by
104 // the "pointers", we always use release-consume ordering pair when accessing these "pointers".
106 // prop "points" to prop_info structure if there is a propery associated with the trie node.
107 // Its situation is similar to the left, right, children "pointers". So we use
108 // atomic_uint_least32_t and release-consume ordering to protect it as well.
110 // We should also avoid rereading these fields redundantly, since not
111 // all processor implementations ensure that multiple loads from the
112 // same field are carried out in the right order.
113 atomic_uint_least32_t prop;
115 atomic_uint_least32_t left;
116 atomic_uint_least32_t right;
118 atomic_uint_least32_t children;
122 prop_bt(const char* name, const uint32_t name_length) {
123 this->namelen = name_length;
124 memcpy(this->name, name, name_length);
125 this->name[name_length] = '\0';
129 DISALLOW_COPY_AND_ASSIGN(prop_bt);
134 prop_area(const uint32_t magic, const uint32_t version) : magic_(magic), version_(version) {
135 atomic_init(&serial_, 0);
136 memset(reserved_, 0, sizeof(reserved_));
137 // Allocate enough space for the root node.
138 bytes_used_ = sizeof(prop_bt);
141 const prop_info* find(const char* name);
142 bool add(const char* name, unsigned int namelen, const char* value, unsigned int valuelen);
144 bool foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie);
146 atomic_uint_least32_t* serial() {
149 uint32_t magic() const {
152 uint32_t version() const {
157 void* allocate_obj(const size_t size, uint_least32_t* const off);
158 prop_bt* new_prop_bt(const char* name, uint32_t namelen, uint_least32_t* const off);
159 prop_info* new_prop_info(const char* name, uint32_t namelen, const char* value, uint32_t valuelen,
160 uint_least32_t* const off);
161 void* to_prop_obj(uint_least32_t off);
162 prop_bt* to_prop_bt(atomic_uint_least32_t* off_p);
163 prop_info* to_prop_info(atomic_uint_least32_t* off_p);
165 prop_bt* root_node();
167 prop_bt* find_prop_bt(prop_bt* const bt, const char* name, uint32_t namelen, bool alloc_if_needed);
169 const prop_info* find_property(prop_bt* const trie, const char* name, uint32_t namelen,
170 const char* value, uint32_t valuelen, bool alloc_if_needed);
172 bool foreach_property(prop_bt* const trie, void (*propfn)(const prop_info* pi, void* cookie),
175 uint32_t bytes_used_;
176 atomic_uint_least32_t serial_;
179 uint32_t reserved_[28];
182 DISALLOW_COPY_AND_ASSIGN(prop_area);
186 atomic_uint_least32_t serial;
187 // we need to keep this buffer around because the property
188 // value can be modified whereas name is constant.
189 char value[PROP_VALUE_MAX];
192 prop_info(const char* name, uint32_t namelen, const char* value, uint32_t valuelen) {
193 memcpy(this->name, name, namelen);
194 this->name[namelen] = '\0';
195 atomic_init(&this->serial, valuelen << 24);
196 memcpy(this->value, value, valuelen);
197 this->value[valuelen] = '\0';
201 DISALLOW_IMPLICIT_CONSTRUCTORS(prop_info);
204 // This is public because it was exposed in the NDK. As of 2017-01, ~60 apps reference this symbol.
205 prop_area* __system_property_area__ = nullptr;
207 static char property_filename[PROP_FILENAME_MAX] = PROP_FILENAME;
208 static size_t pa_data_size;
209 static size_t pa_size;
210 static bool initialized = false;
212 static prop_area* map_prop_area_rw(const char* filename, const char* context,
213 bool* fsetxattr_failed) {
214 /* dev is a tmpfs that we can use to carve a shared workspace
215 * out of, so let's do that...
217 const int fd = open(filename, O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
220 if (errno == EACCES) {
221 /* for consistency with the case where the process has already
222 * mapped the page in and segfaults when trying to write to it
230 if (fsetxattr(fd, XATTR_NAME_SELINUX, context, strlen(context) + 1, 0) != 0) {
231 async_safe_format_log(ANDROID_LOG_ERROR, "libc",
232 "fsetxattr failed to set context (%s) for \"%s\"", context, filename);
234 * fsetxattr() will fail during system properties tests due to selinux policy.
235 * We do not want to create a custom policy for the tester, so we will continue in
236 * this function but set a flag that an error has occurred.
237 * Init, which is the only daemon that should ever call this function will abort
238 * when this error occurs.
239 * Otherwise, the tester will ignore it and continue, albeit without any selinux
240 * property separation.
242 if (fsetxattr_failed) {
243 *fsetxattr_failed = true;
248 if (ftruncate(fd, PA_SIZE) < 0) {
254 pa_data_size = pa_size - sizeof(prop_area);
256 void* const memory_area = mmap(nullptr, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
257 if (memory_area == MAP_FAILED) {
262 prop_area* pa = new (memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
268 static prop_area* map_fd_ro(const int fd) {
270 if (fstat(fd, &fd_stat) < 0) {
274 if ((fd_stat.st_uid != 0) || (fd_stat.st_gid != 0) ||
275 ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) ||
276 (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area)))) {
280 pa_size = fd_stat.st_size;
281 pa_data_size = pa_size - sizeof(prop_area);
283 void* const map_result = mmap(nullptr, pa_size, PROT_READ, MAP_SHARED, fd, 0);
284 if (map_result == MAP_FAILED) {
288 prop_area* pa = reinterpret_cast<prop_area*>(map_result);
289 if ((pa->magic() != PROP_AREA_MAGIC) || (pa->version() != PROP_AREA_VERSION)) {
297 static prop_area* map_prop_area(const char* filename) {
298 int fd = open(filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY);
299 if (fd == -1) return nullptr;
301 prop_area* map_result = map_fd_ro(fd);
307 void* prop_area::allocate_obj(const size_t size, uint_least32_t* const off) {
308 const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t));
309 if (bytes_used_ + aligned > pa_data_size) {
314 bytes_used_ += aligned;
318 prop_bt* prop_area::new_prop_bt(const char* name, uint32_t namelen, uint_least32_t* const off) {
319 uint_least32_t new_offset;
320 void* const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
322 prop_bt* bt = new (p) prop_bt(name, namelen);
330 prop_info* prop_area::new_prop_info(const char* name, uint32_t namelen, const char* value,
331 uint32_t valuelen, uint_least32_t* const off) {
332 uint_least32_t new_offset;
333 void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
335 prop_info* info = new (p) prop_info(name, namelen, value, valuelen);
343 void* prop_area::to_prop_obj(uint_least32_t off) {
344 if (off > pa_data_size) return nullptr;
346 return (data_ + off);
349 inline prop_bt* prop_area::to_prop_bt(atomic_uint_least32_t* off_p) {
350 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
351 return reinterpret_cast<prop_bt*>(to_prop_obj(off));
354 inline prop_info* prop_area::to_prop_info(atomic_uint_least32_t* off_p) {
355 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
356 return reinterpret_cast<prop_info*>(to_prop_obj(off));
359 inline prop_bt* prop_area::root_node() {
360 return reinterpret_cast<prop_bt*>(to_prop_obj(0));
363 static int cmp_prop_name(const char* one, uint32_t one_len, const char* two, uint32_t two_len) {
364 if (one_len < two_len)
366 else if (one_len > two_len)
369 return strncmp(one, two, one_len);
372 prop_bt* prop_area::find_prop_bt(prop_bt* const bt, const char* name, uint32_t namelen,
373 bool alloc_if_needed) {
374 prop_bt* current = bt;
380 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
386 uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed);
387 if (left_offset != 0) {
388 current = to_prop_bt(¤t->left);
390 if (!alloc_if_needed) {
394 uint_least32_t new_offset;
395 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
397 atomic_store_explicit(¤t->left, new_offset, memory_order_release);
402 uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed);
403 if (right_offset != 0) {
404 current = to_prop_bt(¤t->right);
406 if (!alloc_if_needed) {
410 uint_least32_t new_offset;
411 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
413 atomic_store_explicit(¤t->right, new_offset, memory_order_release);
421 const prop_info* prop_area::find_property(prop_bt* const trie, const char* name, uint32_t namelen,
422 const char* value, uint32_t valuelen,
423 bool alloc_if_needed) {
424 if (!trie) return nullptr;
426 const char* remaining_name = name;
427 prop_bt* current = trie;
429 const char* sep = strchr(remaining_name, '.');
430 const bool want_subtree = (sep != nullptr);
431 const uint32_t substr_size = (want_subtree) ? sep - remaining_name : strlen(remaining_name);
437 prop_bt* root = nullptr;
438 uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed);
439 if (children_offset != 0) {
440 root = to_prop_bt(¤t->children);
441 } else if (alloc_if_needed) {
442 uint_least32_t new_offset;
443 root = new_prop_bt(remaining_name, substr_size, &new_offset);
445 atomic_store_explicit(¤t->children, new_offset, memory_order_release);
453 current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
458 if (!want_subtree) break;
460 remaining_name = sep + 1;
463 uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed);
464 if (prop_offset != 0) {
465 return to_prop_info(¤t->prop);
466 } else if (alloc_if_needed) {
467 uint_least32_t new_offset;
468 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
470 atomic_store_explicit(¤t->prop, new_offset, memory_order_release);
479 class PropertyServiceConnection {
481 PropertyServiceConnection() : last_error_(0) {
482 socket_ = ::socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0);
488 const size_t namelen = strlen(property_service_socket);
490 memset(&addr, 0, sizeof(addr));
491 strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path));
492 addr.sun_family = AF_LOCAL;
493 socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1;
495 if (TEMP_FAILURE_RETRY(connect(socket_, reinterpret_cast<sockaddr*>(&addr), alen)) == -1) {
503 return socket_ != -1;
510 bool RecvInt32(int32_t* value) {
511 int result = TEMP_FAILURE_RETRY(recv(socket_, value, sizeof(*value), MSG_WAITALL));
512 return CheckSendRecvResult(result, sizeof(*value));
519 ~PropertyServiceConnection() {
526 bool CheckSendRecvResult(int result, int expected_len) {
529 } else if (result != expected_len) {
535 return last_error_ == 0;
541 friend class SocketWriter;
546 explicit SocketWriter(PropertyServiceConnection* connection)
547 : connection_(connection), iov_index_(0), uint_buf_index_(0)
550 SocketWriter& WriteUint32(uint32_t value) {
551 CHECK(uint_buf_index_ < kUintBufSize);
552 CHECK(iov_index_ < kIovSize);
553 uint32_t* ptr = uint_buf_ + uint_buf_index_;
554 uint_buf_[uint_buf_index_++] = value;
555 iov_[iov_index_].iov_base = ptr;
556 iov_[iov_index_].iov_len = sizeof(*ptr);
561 SocketWriter& WriteString(const char* value) {
562 uint32_t valuelen = strlen(value);
563 WriteUint32(valuelen);
568 CHECK(iov_index_ < kIovSize);
569 iov_[iov_index_].iov_base = const_cast<char*>(value);
570 iov_[iov_index_].iov_len = valuelen;
577 if (!connection_->IsValid()) {
581 if (writev(connection_->socket(), iov_, iov_index_) == -1) {
582 connection_->last_error_ = errno;
586 iov_index_ = uint_buf_index_ = 0;
591 static constexpr size_t kUintBufSize = 8;
592 static constexpr size_t kIovSize = 8;
594 PropertyServiceConnection* connection_;
595 iovec iov_[kIovSize];
597 uint32_t uint_buf_[kUintBufSize];
598 size_t uint_buf_index_;
600 DISALLOW_IMPLICIT_CONSTRUCTORS(SocketWriter);
605 char name[PROP_NAME_MAX];
606 char value[PROP_VALUE_MAX];
609 static int send_prop_msg(const prop_msg* msg) {
610 PropertyServiceConnection connection;
611 if (!connection.IsValid()) {
612 return connection.GetLastError();
616 int s = connection.socket();
618 const int num_bytes = TEMP_FAILURE_RETRY(send(s, msg, sizeof(prop_msg), 0));
619 if (num_bytes == sizeof(prop_msg)) {
620 // We successfully wrote to the property server but now we
621 // wait for the property server to finish its work. It
622 // acknowledges its completion by closing the socket so we
623 // poll here (on nothing), waiting for the socket to close.
624 // If you 'adb shell setprop foo bar' you'll see the POLLHUP
625 // once the socket closes. Out of paranoia we cap our poll
629 pollfds[0].events = 0;
630 const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */));
631 if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) {
634 // Ignore the timeout and treat it like a success anyway.
635 // The init process is single-threaded and its property
636 // service is sometimes slow to respond (perhaps it's off
637 // starting a child process or something) and thus this
638 // times out and the caller thinks it failed, even though
639 // it's still getting around to it. So we fake it here,
640 // mostly for ctl.* properties, but we do try and wait 250
641 // ms so callers who do read-after-write can reliably see
642 // what they've written. Most of the time.
643 // TODO: fix the system properties design.
644 async_safe_format_log(ANDROID_LOG_WARN, "libc",
645 "Property service has timed out while trying to set \"%s\" to \"%s\"",
646 msg->name, msg->value);
654 bool prop_area::foreach_property(prop_bt* const trie,
655 void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
656 if (!trie) return false;
658 uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
659 if (left_offset != 0) {
660 const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
661 if (err < 0) return false;
663 uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
664 if (prop_offset != 0) {
665 prop_info* info = to_prop_info(&trie->prop);
666 if (!info) return false;
667 propfn(info, cookie);
669 uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
670 if (children_offset != 0) {
671 const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
672 if (err < 0) return false;
674 uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
675 if (right_offset != 0) {
676 const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
677 if (err < 0) return false;
683 const prop_info* prop_area::find(const char* name) {
684 return find_property(root_node(), name, strlen(name), nullptr, 0, false);
687 bool prop_area::add(const char* name, unsigned int namelen, const char* value,
688 unsigned int valuelen) {
689 return find_property(root_node(), name, namelen, value, valuelen, true);
692 bool prop_area::foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
693 return foreach_property(root_node(), propfn, cookie);
698 context_node(context_node* next, const char* context, prop_area* pa)
699 : next(next), context_(strdup(context)), pa_(pa), no_access_(false) {
706 bool open(bool access_rw, bool* fsetxattr_failed);
707 bool check_access_and_open();
710 const char* context() const {
730 prefix_node(struct prefix_node* next, const char* prefix, context_node* context)
731 : prefix(strdup(prefix)), prefix_len(strlen(prefix)), context(context), next(next) {
737 const size_t prefix_len;
738 context_node* context;
739 struct prefix_node* next;
742 template <typename List, typename... Args>
743 static inline void list_add(List** list, Args... args) {
744 *list = new List(*list, args...);
747 static void list_add_after_len(prefix_node** list, const char* prefix, context_node* context) {
748 size_t prefix_len = strlen(prefix);
750 auto next_list = list;
753 if ((*next_list)->prefix_len < prefix_len || (*next_list)->prefix[0] == '*') {
754 list_add(next_list, prefix, context);
757 next_list = &(*next_list)->next;
759 list_add(next_list, prefix, context);
762 template <typename List, typename Func>
763 static void list_foreach(List* list, Func func) {
770 template <typename List, typename Func>
771 static List* list_find(List* list, Func func) {
781 template <typename List>
782 static void list_free(List** list) {
784 auto old_list = *list;
785 *list = old_list->next;
790 static prefix_node* prefixes = nullptr;
791 static context_node* contexts = nullptr;
794 * pthread_mutex_lock() calls into system_properties in the case of contention.
795 * This creates a risk of dead lock if any system_properties functions
796 * use pthread locks after system_property initialization.
798 * For this reason, the below three functions use a bionic Lock and static
799 * allocation of memory for each filename.
802 bool context_node::open(bool access_rw, bool* fsetxattr_failed) {
809 char filename[PROP_FILENAME_MAX];
810 int len = async_safe_format_buffer(filename, sizeof(filename), "%s/%s", property_filename,
812 if (len < 0 || len > PROP_FILENAME_MAX) {
818 pa_ = map_prop_area_rw(filename, context_, fsetxattr_failed);
820 pa_ = map_prop_area(filename);
826 bool context_node::check_access_and_open() {
827 if (!pa_ && !no_access_) {
828 if (!check_access() || !open(false, nullptr)) {
835 void context_node::reset_access() {
836 if (!check_access()) {
844 bool context_node::check_access() {
845 char filename[PROP_FILENAME_MAX];
846 int len = async_safe_format_buffer(filename, sizeof(filename), "%s/%s", property_filename,
848 if (len < 0 || len > PROP_FILENAME_MAX) {
852 return access(filename, R_OK) == 0;
855 void context_node::unmap() {
860 munmap(pa_, pa_size);
861 if (pa_ == __system_property_area__) {
862 __system_property_area__ = nullptr;
867 static bool map_system_property_area(bool access_rw, bool* fsetxattr_failed) {
868 char filename[PROP_FILENAME_MAX];
870 async_safe_format_buffer(filename, sizeof(filename), "%s/properties_serial",
872 if (len < 0 || len > PROP_FILENAME_MAX) {
873 __system_property_area__ = nullptr;
878 __system_property_area__ =
879 map_prop_area_rw(filename, "u:object_r:properties_serial:s0", fsetxattr_failed);
881 __system_property_area__ = map_prop_area(filename);
883 return __system_property_area__;
886 static prop_area* get_prop_area_for_name(const char* name) {
887 auto entry = list_find(prefixes, [name](prefix_node* l) {
888 return l->prefix[0] == '*' || !strncmp(l->prefix, name, l->prefix_len);
894 auto cnode = entry->context;
897 * We explicitly do not check no_access_ in this case because unlike the
898 * case of foreach(), we want to generate an selinux audit for each
899 * non-permitted property access in this function.
901 cnode->open(false, nullptr);
907 * The below two functions are duplicated from label_support.c in libselinux.
908 * TODO: Find a location suitable for these functions such that both libc and
909 * libselinux can share a common source file.
913 * The read_spec_entries and read_spec_entry functions may be used to
914 * replace sscanf to read entries from spec files. The file and
915 * property services now use these.
918 /* Read an entry from a spec file (e.g. file_contexts) */
919 static inline int read_spec_entry(char** entry, char** ptr, int* len) {
921 char* tmp_buf = nullptr;
923 while (isspace(**ptr) && **ptr != '\0') (*ptr)++;
928 while (!isspace(**ptr) && **ptr != '\0') {
934 *entry = strndup(tmp_buf, *len);
935 if (!*entry) return -1;
942 * line_buf - Buffer containing the spec entries .
943 * num_args - The number of spec parameter entries to process.
944 * ... - A 'char **spec_entry' for each parameter.
945 * returns - The number of items processed.
947 * This function calls read_spec_entry() to do the actual string processing.
949 static int read_spec_entries(char* line_buf, int num_args, ...) {
950 char **spec_entry, *buf_p;
951 int len, rc, items, entry_len = 0;
954 len = strlen(line_buf);
955 if (line_buf[len - 1] == '\n')
956 line_buf[len - 1] = '\0';
958 /* Handle case if line not \n terminated by bumping
959 * the len for the check below (as the line is NUL
960 * terminated by getline(3)) */
964 while (isspace(*buf_p)) buf_p++;
966 /* Skip comment lines and empty lines. */
967 if (*buf_p == '#' || *buf_p == '\0') return 0;
969 /* Process the spec file entries */
970 va_start(ap, num_args);
973 while (items < num_args) {
974 spec_entry = va_arg(ap, char**);
976 if (len - 1 == buf_p - line_buf) {
981 rc = read_spec_entry(spec_entry, &buf_p, &entry_len);
986 if (entry_len) items++;
992 static bool initialize_properties_from_file(const char* filename) {
993 FILE* file = fopen(filename, "re");
998 char* buffer = nullptr;
1000 char* prop_prefix = nullptr;
1001 char* context = nullptr;
1003 while (getline(&buffer, &line_len, file) > 0) {
1004 int items = read_spec_entries(buffer, 2, &prop_prefix, &context);
1013 * init uses ctl.* properties as an IPC mechanism and does not write them
1014 * to a property file, therefore we do not need to create property files
1017 if (!strncmp(prop_prefix, "ctl.", 4)) {
1024 list_find(contexts, [context](context_node* l) { return !strcmp(l->context(), context); });
1026 list_add_after_len(&prefixes, prop_prefix, old_context);
1028 list_add(&contexts, context, nullptr);
1029 list_add_after_len(&prefixes, prop_prefix, contexts);
1041 static bool initialize_properties() {
1042 // If we do find /property_contexts, then this is being
1043 // run as part of the OTA updater on older release that had
1044 // /property_contexts - b/34370523
1045 if (initialize_properties_from_file("/property_contexts")) {
1049 // Use property_contexts from /system & /vendor, fall back to those from /
1050 if (access("/system/etc/selinux/plat_property_contexts", R_OK) != -1) {
1051 if (!initialize_properties_from_file("/system/etc/selinux/plat_property_contexts")) {
1054 if (!initialize_properties_from_file("/vendor/etc/selinux/nonplat_property_contexts")) {
1058 if (!initialize_properties_from_file("/plat_property_contexts")) {
1061 if (!initialize_properties_from_file("/nonplat_property_contexts")) {
1069 static bool is_dir(const char* pathname) {
1071 if (stat(pathname, &info) == -1) {
1074 return S_ISDIR(info.st_mode);
1077 static void free_and_unmap_contexts() {
1078 list_free(&prefixes);
1079 list_free(&contexts);
1080 if (__system_property_area__) {
1081 munmap(__system_property_area__, pa_size);
1082 __system_property_area__ = nullptr;
1086 int __system_properties_init() {
1087 // This is called from __libc_init_common, and should leave errno at 0 (http://b/37248982).
1088 ErrnoRestorer errno_restorer;
1091 list_foreach(contexts, [](context_node* l) { l->reset_access(); });
1094 if (is_dir(property_filename)) {
1095 if (!initialize_properties()) {
1098 if (!map_system_property_area(false, nullptr)) {
1099 free_and_unmap_contexts();
1103 __system_property_area__ = map_prop_area(property_filename);
1104 if (!__system_property_area__) {
1107 list_add(&contexts, "legacy_system_prop_area", __system_property_area__);
1108 list_add_after_len(&prefixes, "*", contexts);
1114 int __system_property_set_filename(const char* filename) {
1115 size_t len = strlen(filename);
1116 if (len >= sizeof(property_filename)) return -1;
1118 strcpy(property_filename, filename);
1122 int __system_property_area_init() {
1123 free_and_unmap_contexts();
1124 mkdir(property_filename, S_IRWXU | S_IXGRP | S_IXOTH);
1125 if (!initialize_properties()) {
1128 bool open_failed = false;
1129 bool fsetxattr_failed = false;
1130 list_foreach(contexts, [&fsetxattr_failed, &open_failed](context_node* l) {
1131 if (!l->open(true, &fsetxattr_failed)) {
1135 if (open_failed || !map_system_property_area(true, &fsetxattr_failed)) {
1136 free_and_unmap_contexts();
1140 return fsetxattr_failed ? -2 : 0;
1143 uint32_t __system_property_area_serial() {
1144 prop_area* pa = __system_property_area__;
1148 // Make sure this read fulfilled before __system_property_serial
1149 return atomic_load_explicit(pa->serial(), memory_order_acquire);
1152 const prop_info* __system_property_find(const char* name) {
1153 if (!__system_property_area__) {
1157 prop_area* pa = get_prop_area_for_name(name);
1159 async_safe_format_log(ANDROID_LOG_ERROR, "libc", "Access denied finding property \"%s\"", name);
1163 return pa->find(name);
1166 // The C11 standard doesn't allow atomic loads from const fields,
1167 // though C++11 does. Fudge it until standards get straightened out.
1168 static inline uint_least32_t load_const_atomic(const atomic_uint_least32_t* s, memory_order mo) {
1169 atomic_uint_least32_t* non_const_s = const_cast<atomic_uint_least32_t*>(s);
1170 return atomic_load_explicit(non_const_s, mo);
1173 int __system_property_read(const prop_info* pi, char* name, char* value) {
1175 uint32_t serial = __system_property_serial(pi); // acquire semantics
1176 size_t len = SERIAL_VALUE_LEN(serial);
1177 memcpy(value, pi->value, len + 1);
1178 // TODO: Fix the synchronization scheme here.
1179 // There is no fully supported way to implement this kind
1180 // of synchronization in C++11, since the memcpy races with
1181 // updates to pi, and the data being accessed is not atomic.
1182 // The following fence is unintuitive, but would be the
1183 // correct one if memcpy used memory_order_relaxed atomic accesses.
1184 // In practice it seems unlikely that the generated code would
1185 // would be any different, so this should be OK.
1186 atomic_thread_fence(memory_order_acquire);
1187 if (serial == load_const_atomic(&(pi->serial), memory_order_relaxed)) {
1188 if (name != nullptr) {
1189 size_t namelen = strlcpy(name, pi->name, PROP_NAME_MAX);
1190 if (namelen >= PROP_NAME_MAX) {
1191 async_safe_format_log(ANDROID_LOG_ERROR, "libc",
1192 "The property name length for \"%s\" is >= %d;"
1193 " please use __system_property_read_callback"
1194 " to read this property. (the name is truncated to \"%s\")",
1195 pi->name, PROP_NAME_MAX - 1, name);
1203 void __system_property_read_callback(const prop_info* pi,
1204 void (*callback)(void* cookie,
1210 uint32_t serial = __system_property_serial(pi); // acquire semantics
1211 size_t len = SERIAL_VALUE_LEN(serial);
1212 char value_buf[len + 1];
1214 memcpy(value_buf, pi->value, len);
1215 value_buf[len] = '\0';
1217 // TODO: see todo in __system_property_read function
1218 atomic_thread_fence(memory_order_acquire);
1219 if (serial == load_const_atomic(&(pi->serial), memory_order_relaxed)) {
1220 callback(cookie, pi->name, value_buf, serial);
1226 int __system_property_get(const char* name, char* value) {
1227 const prop_info* pi = __system_property_find(name);
1230 return __system_property_read(pi, nullptr, value);
1237 static constexpr uint32_t kProtocolVersion1 = 1;
1238 static constexpr uint32_t kProtocolVersion2 = 2; // current
1240 static atomic_uint_least32_t g_propservice_protocol_version = 0;
1242 static void detect_protocol_version() {
1243 char value[PROP_VALUE_MAX];
1244 if (__system_property_get(kServiceVersionPropertyName, value) == 0) {
1245 g_propservice_protocol_version = kProtocolVersion1;
1246 async_safe_format_log(ANDROID_LOG_WARN, "libc",
1247 "Using old property service protocol (\"%s\" is not set)",
1248 kServiceVersionPropertyName);
1250 uint32_t version = static_cast<uint32_t>(atoll(value));
1251 if (version >= kProtocolVersion2) {
1252 g_propservice_protocol_version = kProtocolVersion2;
1254 async_safe_format_log(ANDROID_LOG_WARN, "libc",
1255 "Using old property service protocol (\"%s\"=\"%s\")",
1256 kServiceVersionPropertyName, value);
1257 g_propservice_protocol_version = kProtocolVersion1;
1262 int __system_property_set(const char* key, const char* value) {
1263 if (key == nullptr) return -1;
1264 if (value == nullptr) value = "";
1265 if (strlen(value) >= PROP_VALUE_MAX) return -1;
1267 if (g_propservice_protocol_version == 0) {
1268 detect_protocol_version();
1271 if (g_propservice_protocol_version == kProtocolVersion1) {
1272 // Old protocol does not support long names
1273 if (strlen(key) >= PROP_NAME_MAX) return -1;
1276 memset(&msg, 0, sizeof msg);
1277 msg.cmd = PROP_MSG_SETPROP;
1278 strlcpy(msg.name, key, sizeof msg.name);
1279 strlcpy(msg.value, value, sizeof msg.value);
1281 return send_prop_msg(&msg);
1283 // Use proper protocol
1284 PropertyServiceConnection connection;
1285 if (!connection.IsValid()) {
1286 errno = connection.GetLastError();
1287 async_safe_format_log(ANDROID_LOG_WARN,
1289 "Unable to set property \"%s\" to \"%s\": connection failed; errno=%d (%s)",
1297 SocketWriter writer(&connection);
1298 if (!writer.WriteUint32(PROP_MSG_SETPROP2).WriteString(key).WriteString(value).Send()) {
1299 errno = connection.GetLastError();
1300 async_safe_format_log(ANDROID_LOG_WARN,
1302 "Unable to set property \"%s\" to \"%s\": write failed; errno=%d (%s)",
1311 if (!connection.RecvInt32(&result)) {
1312 errno = connection.GetLastError();
1313 async_safe_format_log(ANDROID_LOG_WARN,
1315 "Unable to set property \"%s\" to \"%s\": recv failed; errno=%d (%s)",
1323 if (result != PROP_SUCCESS) {
1324 async_safe_format_log(ANDROID_LOG_WARN,
1326 "Unable to set property \"%s\" to \"%s\": error code: 0x%x",
1337 int __system_property_update(prop_info* pi, const char* value, unsigned int len) {
1338 if (len >= PROP_VALUE_MAX) {
1342 prop_area* pa = __system_property_area__;
1348 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed);
1350 atomic_store_explicit(&pi->serial, serial, memory_order_relaxed);
1351 // The memcpy call here also races. Again pretend it
1352 // used memory_order_relaxed atomics, and use the analogous
1353 // counterintuitive fence.
1354 atomic_thread_fence(memory_order_release);
1355 strlcpy(pi->value, value, len + 1);
1357 atomic_store_explicit(&pi->serial, (len << 24) | ((serial + 1) & 0xffffff), memory_order_release);
1358 __futex_wake(&pi->serial, INT32_MAX);
1360 atomic_store_explicit(pa->serial(), atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1,
1361 memory_order_release);
1362 __futex_wake(pa->serial(), INT32_MAX);
1367 int __system_property_add(const char* name, unsigned int namelen, const char* value,
1368 unsigned int valuelen) {
1369 if (valuelen >= PROP_VALUE_MAX) {
1377 if (!__system_property_area__) {
1381 prop_area* pa = get_prop_area_for_name(name);
1384 async_safe_format_log(ANDROID_LOG_ERROR, "libc", "Access denied adding property \"%s\"", name);
1388 bool ret = pa->add(name, namelen, value, valuelen);
1393 // There is only a single mutator, but we want to make sure that
1394 // updates are visible to a reader waiting for the update.
1395 atomic_store_explicit(
1396 __system_property_area__->serial(),
1397 atomic_load_explicit(__system_property_area__->serial(), memory_order_relaxed) + 1,
1398 memory_order_release);
1399 __futex_wake(__system_property_area__->serial(), INT32_MAX);
1403 // Wait for non-locked serial, and retrieve it with acquire semantics.
1404 uint32_t __system_property_serial(const prop_info* pi) {
1405 uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire);
1406 while (SERIAL_DIRTY(serial)) {
1407 __futex_wait(const_cast<_Atomic(uint_least32_t)*>(&pi->serial), serial, nullptr);
1408 serial = load_const_atomic(&pi->serial, memory_order_acquire);
1413 uint32_t __system_property_wait_any(uint32_t old_serial) {
1414 uint32_t new_serial;
1415 __system_property_wait(nullptr, old_serial, &new_serial, nullptr);
1419 bool __system_property_wait(const prop_info* pi,
1420 uint32_t old_serial,
1421 uint32_t* new_serial_ptr,
1422 const timespec* relative_timeout) {
1423 // Are we waiting on the global serial or a specific serial?
1424 atomic_uint_least32_t* serial_ptr;
1425 if (pi == nullptr) {
1426 if (__system_property_area__ == nullptr) return -1;
1427 serial_ptr = __system_property_area__->serial();
1429 serial_ptr = const_cast<atomic_uint_least32_t*>(&pi->serial);
1432 uint32_t new_serial;
1435 if ((rc = __futex_wait(serial_ptr, old_serial, relative_timeout)) != 0 && rc == -ETIMEDOUT) {
1438 new_serial = load_const_atomic(serial_ptr, memory_order_acquire);
1439 } while (new_serial == old_serial);
1441 *new_serial_ptr = new_serial;
1445 const prop_info* __system_property_find_nth(unsigned n) {
1447 const uint32_t sought;
1449 const prop_info* result;
1451 explicit find_nth(uint32_t n) : sought(n), current(0), result(nullptr) {}
1452 static void fn(const prop_info* pi, void* ptr) {
1453 find_nth* self = reinterpret_cast<find_nth*>(ptr);
1454 if (self->current++ == self->sought) self->result = pi;
1457 __system_property_foreach(find_nth::fn, &state);
1458 return state.result;
1461 int __system_property_foreach(void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
1462 if (!__system_property_area__) {
1466 list_foreach(contexts, [propfn, cookie](context_node* l) {
1467 if (l->check_access_and_open()) {
1468 l->pa()->foreach(propfn, cookie);