2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <stdatomic.h>
42 #include <linux/xattr.h>
43 #include <netinet/in.h>
45 #include <sys/select.h>
46 #include <sys/socket.h>
48 #include <sys/types.h>
51 #include <sys/xattr.h>
53 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
54 #include <sys/_system_properties.h>
55 #include <sys/system_properties.h>
57 #include "private/bionic_futex.h"
58 #include "private/bionic_lock.h"
59 #include "private/bionic_macros.h"
60 #include "private/bionic_sdk_version.h"
61 #include "private/libc_logging.h"
63 static constexpr int PROP_FILENAME_MAX = 1024;
65 static constexpr uint32_t PROP_AREA_MAGIC = 0x504f5250;
66 static constexpr uint32_t PROP_AREA_VERSION = 0xfc6ed0ab;
68 static constexpr size_t PA_SIZE = 128 * 1024;
70 #define SERIAL_DIRTY(serial) ((serial)&1)
71 #define SERIAL_VALUE_LEN(serial) ((serial) >> 24)
73 static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME;
74 static const char* kServiceVersionPropertyName = "ro.property_service.version";
77 * Properties are stored in a hybrid trie/binary tree structure.
78 * Each property's name is delimited at '.' characters, and the tokens are put
79 * into a trie structure. Siblings at each level of the trie are stored in a
80 * binary tree. For instance, "ro.secure"="1" could be stored as follows:
82 * +-----+ children +----+ children +--------+
83 * | |-------------->| ro |-------------->| secure |
84 * +-----+ +----+ +--------+
86 * left / \ right left / | prop +===========+
87 * v v v +-------->| ro.secure |
88 * +-----+ +-----+ +-----+ +-----------+
89 * | net | | sys | | com | | 1 |
90 * +-----+ +-----+ +-----+ +===========+
93 // Represents a node in the trie.
97 // The property trie is updated only by the init process (single threaded) which provides
98 // property service. And it can be read by multiple threads at the same time.
99 // As the property trie is not protected by locks, we use atomic_uint_least32_t types for the
100 // left, right, children "pointers" in the trie node. To make sure readers who see the
101 // change of "pointers" can also notice the change of prop_bt structure contents pointed by
102 // the "pointers", we always use release-consume ordering pair when accessing these "pointers".
104 // prop "points" to prop_info structure if there is a propery associated with the trie node.
105 // Its situation is similar to the left, right, children "pointers". So we use
106 // atomic_uint_least32_t and release-consume ordering to protect it as well.
108 // We should also avoid rereading these fields redundantly, since not
109 // all processor implementations ensure that multiple loads from the
110 // same field are carried out in the right order.
111 atomic_uint_least32_t prop;
113 atomic_uint_least32_t left;
114 atomic_uint_least32_t right;
116 atomic_uint_least32_t children;
120 prop_bt(const char* name, const uint32_t name_length) {
121 this->namelen = name_length;
122 memcpy(this->name, name, name_length);
123 this->name[name_length] = '\0';
127 DISALLOW_COPY_AND_ASSIGN(prop_bt);
132 prop_area(const uint32_t magic, const uint32_t version) : magic_(magic), version_(version) {
133 atomic_init(&serial_, 0);
134 memset(reserved_, 0, sizeof(reserved_));
135 // Allocate enough space for the root node.
136 bytes_used_ = sizeof(prop_bt);
139 const prop_info* find(const char* name);
140 bool add(const char* name, unsigned int namelen, const char* value, unsigned int valuelen);
142 bool foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie);
144 atomic_uint_least32_t* serial() {
147 uint32_t magic() const {
150 uint32_t version() const {
155 void* allocate_obj(const size_t size, uint_least32_t* const off);
156 prop_bt* new_prop_bt(const char* name, uint32_t namelen, uint_least32_t* const off);
157 prop_info* new_prop_info(const char* name, uint32_t namelen, const char* value, uint32_t valuelen,
158 uint_least32_t* const off);
159 void* to_prop_obj(uint_least32_t off);
160 prop_bt* to_prop_bt(atomic_uint_least32_t* off_p);
161 prop_info* to_prop_info(atomic_uint_least32_t* off_p);
163 prop_bt* root_node();
165 prop_bt* find_prop_bt(prop_bt* const bt, const char* name, uint32_t namelen, bool alloc_if_needed);
167 const prop_info* find_property(prop_bt* const trie, const char* name, uint32_t namelen,
168 const char* value, uint32_t valuelen, bool alloc_if_needed);
170 bool foreach_property(prop_bt* const trie, void (*propfn)(const prop_info* pi, void* cookie),
173 uint32_t bytes_used_;
174 atomic_uint_least32_t serial_;
177 uint32_t reserved_[28];
180 DISALLOW_COPY_AND_ASSIGN(prop_area);
184 atomic_uint_least32_t serial;
185 // we need to keep this buffer around because the property
186 // value can be modified whereas name is constant.
187 char value[PROP_VALUE_MAX];
190 prop_info(const char* name, uint32_t namelen, const char* value, uint32_t valuelen) {
191 memcpy(this->name, name, namelen);
192 this->name[namelen] = '\0';
193 atomic_init(&this->serial, valuelen << 24);
194 memcpy(this->value, value, valuelen);
195 this->value[valuelen] = '\0';
199 DISALLOW_IMPLICIT_CONSTRUCTORS(prop_info);
202 struct find_nth_cookie {
207 explicit find_nth_cookie(uint32_t n) : count(0), n(n), pi(nullptr) {
211 // This is public because it was exposed in the NDK. As of 2017-01, ~60 apps reference this symbol.
212 prop_area* __system_property_area__ = nullptr;
214 static char property_filename[PROP_FILENAME_MAX] = PROP_FILENAME;
215 static size_t pa_data_size;
216 static size_t pa_size;
217 static bool initialized = false;
219 static prop_area* map_prop_area_rw(const char* filename, const char* context,
220 bool* fsetxattr_failed) {
221 /* dev is a tmpfs that we can use to carve a shared workspace
222 * out of, so let's do that...
224 const int fd = open(filename, O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
227 if (errno == EACCES) {
228 /* for consistency with the case where the process has already
229 * mapped the page in and segfaults when trying to write to it
237 if (fsetxattr(fd, XATTR_NAME_SELINUX, context, strlen(context) + 1, 0) != 0) {
238 __libc_format_log(ANDROID_LOG_ERROR, "libc",
239 "fsetxattr failed to set context (%s) for \"%s\"", context, filename);
241 * fsetxattr() will fail during system properties tests due to selinux policy.
242 * We do not want to create a custom policy for the tester, so we will continue in
243 * this function but set a flag that an error has occurred.
244 * Init, which is the only daemon that should ever call this function will abort
245 * when this error occurs.
246 * Otherwise, the tester will ignore it and continue, albeit without any selinux
247 * property separation.
249 if (fsetxattr_failed) {
250 *fsetxattr_failed = true;
255 if (ftruncate(fd, PA_SIZE) < 0) {
261 pa_data_size = pa_size - sizeof(prop_area);
263 void* const memory_area = mmap(nullptr, pa_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
264 if (memory_area == MAP_FAILED) {
269 prop_area* pa = new (memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
275 static prop_area* map_fd_ro(const int fd) {
277 if (fstat(fd, &fd_stat) < 0) {
281 if ((fd_stat.st_uid != 0) || (fd_stat.st_gid != 0) ||
282 ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) ||
283 (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area)))) {
287 pa_size = fd_stat.st_size;
288 pa_data_size = pa_size - sizeof(prop_area);
290 void* const map_result = mmap(nullptr, pa_size, PROT_READ, MAP_SHARED, fd, 0);
291 if (map_result == MAP_FAILED) {
295 prop_area* pa = reinterpret_cast<prop_area*>(map_result);
296 if ((pa->magic() != PROP_AREA_MAGIC) || (pa->version() != PROP_AREA_VERSION)) {
304 static prop_area* map_prop_area(const char* filename) {
305 int fd = open(filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY);
306 if (fd == -1) return nullptr;
308 prop_area* map_result = map_fd_ro(fd);
314 void* prop_area::allocate_obj(const size_t size, uint_least32_t* const off) {
315 const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t));
316 if (bytes_used_ + aligned > pa_data_size) {
321 bytes_used_ += aligned;
325 prop_bt* prop_area::new_prop_bt(const char* name, uint32_t namelen, uint_least32_t* const off) {
326 uint_least32_t new_offset;
327 void* const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
329 prop_bt* bt = new (p) prop_bt(name, namelen);
337 prop_info* prop_area::new_prop_info(const char* name, uint32_t namelen, const char* value,
338 uint32_t valuelen, uint_least32_t* const off) {
339 uint_least32_t new_offset;
340 void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
342 prop_info* info = new (p) prop_info(name, namelen, value, valuelen);
350 void* prop_area::to_prop_obj(uint_least32_t off) {
351 if (off > pa_data_size) return nullptr;
353 return (data_ + off);
356 inline prop_bt* prop_area::to_prop_bt(atomic_uint_least32_t* off_p) {
357 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
358 return reinterpret_cast<prop_bt*>(to_prop_obj(off));
361 inline prop_info* prop_area::to_prop_info(atomic_uint_least32_t* off_p) {
362 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
363 return reinterpret_cast<prop_info*>(to_prop_obj(off));
366 inline prop_bt* prop_area::root_node() {
367 return reinterpret_cast<prop_bt*>(to_prop_obj(0));
370 static int cmp_prop_name(const char* one, uint32_t one_len, const char* two, uint32_t two_len) {
371 if (one_len < two_len)
373 else if (one_len > two_len)
376 return strncmp(one, two, one_len);
379 prop_bt* prop_area::find_prop_bt(prop_bt* const bt, const char* name, uint32_t namelen,
380 bool alloc_if_needed) {
381 prop_bt* current = bt;
387 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
393 uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed);
394 if (left_offset != 0) {
395 current = to_prop_bt(¤t->left);
397 if (!alloc_if_needed) {
401 uint_least32_t new_offset;
402 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
404 atomic_store_explicit(¤t->left, new_offset, memory_order_release);
409 uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed);
410 if (right_offset != 0) {
411 current = to_prop_bt(¤t->right);
413 if (!alloc_if_needed) {
417 uint_least32_t new_offset;
418 prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
420 atomic_store_explicit(¤t->right, new_offset, memory_order_release);
428 const prop_info* prop_area::find_property(prop_bt* const trie, const char* name, uint32_t namelen,
429 const char* value, uint32_t valuelen,
430 bool alloc_if_needed) {
431 if (!trie) return nullptr;
433 const char* remaining_name = name;
434 prop_bt* current = trie;
436 const char* sep = strchr(remaining_name, '.');
437 const bool want_subtree = (sep != nullptr);
438 const uint32_t substr_size = (want_subtree) ? sep - remaining_name : strlen(remaining_name);
444 prop_bt* root = nullptr;
445 uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed);
446 if (children_offset != 0) {
447 root = to_prop_bt(¤t->children);
448 } else if (alloc_if_needed) {
449 uint_least32_t new_offset;
450 root = new_prop_bt(remaining_name, substr_size, &new_offset);
452 atomic_store_explicit(¤t->children, new_offset, memory_order_release);
460 current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
465 if (!want_subtree) break;
467 remaining_name = sep + 1;
470 uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed);
471 if (prop_offset != 0) {
472 return to_prop_info(¤t->prop);
473 } else if (alloc_if_needed) {
474 uint_least32_t new_offset;
475 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
477 atomic_store_explicit(¤t->prop, new_offset, memory_order_release);
486 class PropertyServiceConnection {
488 PropertyServiceConnection() : last_error_(0) {
489 socket_ = ::socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0);
495 const size_t namelen = strlen(property_service_socket);
497 memset(&addr, 0, sizeof(addr));
498 strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path));
499 addr.sun_family = AF_LOCAL;
500 socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1;
502 if (TEMP_FAILURE_RETRY(connect(socket_, reinterpret_cast<sockaddr*>(&addr), alen)) == -1) {
510 return socket_ != -1;
517 bool RecvInt32(int32_t* value) {
518 int result = TEMP_FAILURE_RETRY(recv(socket_, value, sizeof(*value), MSG_WAITALL));
519 return CheckSendRecvResult(result, sizeof(*value));
526 ~PropertyServiceConnection() {
533 bool CheckSendRecvResult(int result, int expected_len) {
536 } else if (result != expected_len) {
542 return last_error_ == 0;
548 friend class SocketWriter;
553 explicit SocketWriter(PropertyServiceConnection* connection)
554 : connection_(connection), iov_index_(0), uint_buf_index_(0)
557 SocketWriter& WriteUint32(uint32_t value) {
558 CHECK(uint_buf_index_ < kUintBufSize);
559 CHECK(iov_index_ < kIovSize);
560 uint32_t* ptr = uint_buf_ + uint_buf_index_;
561 uint_buf_[uint_buf_index_++] = value;
562 iov_[iov_index_].iov_base = ptr;
563 iov_[iov_index_].iov_len = sizeof(*ptr);
568 SocketWriter& WriteString(const char* value) {
569 uint32_t valuelen = strlen(value);
570 WriteUint32(valuelen);
575 CHECK(iov_index_ < kIovSize);
576 iov_[iov_index_].iov_base = const_cast<char*>(value);
577 iov_[iov_index_].iov_len = valuelen;
584 if (!connection_->IsValid()) {
588 if (writev(connection_->socket(), iov_, iov_index_) == -1) {
589 connection_->last_error_ = errno;
593 iov_index_ = uint_buf_index_ = 0;
598 static constexpr size_t kUintBufSize = 8;
599 static constexpr size_t kIovSize = 8;
601 PropertyServiceConnection* connection_;
602 iovec iov_[kIovSize];
604 uint32_t uint_buf_[kUintBufSize];
605 size_t uint_buf_index_;
607 DISALLOW_IMPLICIT_CONSTRUCTORS(SocketWriter);
612 char name[PROP_NAME_MAX];
613 char value[PROP_VALUE_MAX];
616 static int send_prop_msg(const prop_msg* msg) {
617 PropertyServiceConnection connection;
618 if (!connection.IsValid()) {
619 return connection.GetLastError();
623 int s = connection.socket();
625 const int num_bytes = TEMP_FAILURE_RETRY(send(s, msg, sizeof(prop_msg), 0));
626 if (num_bytes == sizeof(prop_msg)) {
627 // We successfully wrote to the property server but now we
628 // wait for the property server to finish its work. It
629 // acknowledges its completion by closing the socket so we
630 // poll here (on nothing), waiting for the socket to close.
631 // If you 'adb shell setprop foo bar' you'll see the POLLHUP
632 // once the socket closes. Out of paranoia we cap our poll
636 pollfds[0].events = 0;
637 const int poll_result = TEMP_FAILURE_RETRY(poll(pollfds, 1, 250 /* ms */));
638 if (poll_result == 1 && (pollfds[0].revents & POLLHUP) != 0) {
641 // Ignore the timeout and treat it like a success anyway.
642 // The init process is single-threaded and its property
643 // service is sometimes slow to respond (perhaps it's off
644 // starting a child process or something) and thus this
645 // times out and the caller thinks it failed, even though
646 // it's still getting around to it. So we fake it here,
647 // mostly for ctl.* properties, but we do try and wait 250
648 // ms so callers who do read-after-write can reliably see
649 // what they've written. Most of the time.
650 // TODO: fix the system properties design.
651 __libc_format_log(ANDROID_LOG_WARN, "libc",
652 "Property service has timed out while trying to set \"%s\" to \"%s\"",
653 msg->name, msg->value);
661 static void find_nth_fn(const prop_info* pi, void* ptr) {
662 find_nth_cookie* cookie = reinterpret_cast<find_nth_cookie*>(ptr);
664 if (cookie->n == cookie->count) cookie->pi = pi;
669 bool prop_area::foreach_property(prop_bt* const trie,
670 void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
671 if (!trie) return false;
673 uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
674 if (left_offset != 0) {
675 const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
676 if (err < 0) return false;
678 uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
679 if (prop_offset != 0) {
680 prop_info* info = to_prop_info(&trie->prop);
681 if (!info) return false;
682 propfn(info, cookie);
684 uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
685 if (children_offset != 0) {
686 const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
687 if (err < 0) return false;
689 uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
690 if (right_offset != 0) {
691 const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
692 if (err < 0) return false;
698 const prop_info* prop_area::find(const char* name) {
699 return find_property(root_node(), name, strlen(name), nullptr, 0, false);
702 bool prop_area::add(const char* name, unsigned int namelen, const char* value,
703 unsigned int valuelen) {
704 return find_property(root_node(), name, namelen, value, valuelen, true);
707 bool prop_area::foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
708 return foreach_property(root_node(), propfn, cookie);
713 context_node(context_node* next, const char* context, prop_area* pa)
714 : next(next), context_(strdup(context)), pa_(pa), no_access_(false) {
721 bool open(bool access_rw, bool* fsetxattr_failed);
722 bool check_access_and_open();
725 const char* context() const {
745 prefix_node(struct prefix_node* next, const char* prefix, context_node* context)
746 : prefix(strdup(prefix)), prefix_len(strlen(prefix)), context(context), next(next) {
752 const size_t prefix_len;
753 context_node* context;
754 struct prefix_node* next;
757 template <typename List, typename... Args>
758 static inline void list_add(List** list, Args... args) {
759 *list = new List(*list, args...);
762 static void list_add_after_len(prefix_node** list, const char* prefix, context_node* context) {
763 size_t prefix_len = strlen(prefix);
765 auto next_list = list;
768 if ((*next_list)->prefix_len < prefix_len || (*next_list)->prefix[0] == '*') {
769 list_add(next_list, prefix, context);
772 next_list = &(*next_list)->next;
774 list_add(next_list, prefix, context);
777 template <typename List, typename Func>
778 static void list_foreach(List* list, Func func) {
785 template <typename List, typename Func>
786 static List* list_find(List* list, Func func) {
796 template <typename List>
797 static void list_free(List** list) {
799 auto old_list = *list;
800 *list = old_list->next;
805 static prefix_node* prefixes = nullptr;
806 static context_node* contexts = nullptr;
809 * pthread_mutex_lock() calls into system_properties in the case of contention.
810 * This creates a risk of dead lock if any system_properties functions
811 * use pthread locks after system_property initialization.
813 * For this reason, the below three functions use a bionic Lock and static
814 * allocation of memory for each filename.
817 bool context_node::open(bool access_rw, bool* fsetxattr_failed) {
824 char filename[PROP_FILENAME_MAX];
825 int len = __libc_format_buffer(filename, sizeof(filename), "%s/%s", property_filename, context_);
826 if (len < 0 || len > PROP_FILENAME_MAX) {
832 pa_ = map_prop_area_rw(filename, context_, fsetxattr_failed);
834 pa_ = map_prop_area(filename);
840 bool context_node::check_access_and_open() {
841 if (!pa_ && !no_access_) {
842 if (!check_access() || !open(false, nullptr)) {
849 void context_node::reset_access() {
850 if (!check_access()) {
858 bool context_node::check_access() {
859 char filename[PROP_FILENAME_MAX];
860 int len = __libc_format_buffer(filename, sizeof(filename), "%s/%s", property_filename, context_);
861 if (len < 0 || len > PROP_FILENAME_MAX) {
865 return access(filename, R_OK) == 0;
868 void context_node::unmap() {
873 munmap(pa_, pa_size);
874 if (pa_ == __system_property_area__) {
875 __system_property_area__ = nullptr;
880 static bool map_system_property_area(bool access_rw, bool* fsetxattr_failed) {
881 char filename[PROP_FILENAME_MAX];
883 __libc_format_buffer(filename, sizeof(filename), "%s/properties_serial", property_filename);
884 if (len < 0 || len > PROP_FILENAME_MAX) {
885 __system_property_area__ = nullptr;
890 __system_property_area__ =
891 map_prop_area_rw(filename, "u:object_r:properties_serial:s0", fsetxattr_failed);
893 __system_property_area__ = map_prop_area(filename);
895 return __system_property_area__;
898 static prop_area* get_prop_area_for_name(const char* name) {
899 auto entry = list_find(prefixes, [name](prefix_node* l) {
900 return l->prefix[0] == '*' || !strncmp(l->prefix, name, l->prefix_len);
906 auto cnode = entry->context;
909 * We explicitly do not check no_access_ in this case because unlike the
910 * case of foreach(), we want to generate an selinux audit for each
911 * non-permitted property access in this function.
913 cnode->open(false, nullptr);
919 * The below two functions are duplicated from label_support.c in libselinux.
920 * TODO: Find a location suitable for these functions such that both libc and
921 * libselinux can share a common source file.
925 * The read_spec_entries and read_spec_entry functions may be used to
926 * replace sscanf to read entries from spec files. The file and
927 * property services now use these.
930 /* Read an entry from a spec file (e.g. file_contexts) */
931 static inline int read_spec_entry(char** entry, char** ptr, int* len) {
933 char* tmp_buf = nullptr;
935 while (isspace(**ptr) && **ptr != '\0') (*ptr)++;
940 while (!isspace(**ptr) && **ptr != '\0') {
946 *entry = strndup(tmp_buf, *len);
947 if (!*entry) return -1;
954 * line_buf - Buffer containing the spec entries .
955 * num_args - The number of spec parameter entries to process.
956 * ... - A 'char **spec_entry' for each parameter.
957 * returns - The number of items processed.
959 * This function calls read_spec_entry() to do the actual string processing.
961 static int read_spec_entries(char* line_buf, int num_args, ...) {
962 char **spec_entry, *buf_p;
963 int len, rc, items, entry_len = 0;
966 len = strlen(line_buf);
967 if (line_buf[len - 1] == '\n')
968 line_buf[len - 1] = '\0';
970 /* Handle case if line not \n terminated by bumping
971 * the len for the check below (as the line is NUL
972 * terminated by getline(3)) */
976 while (isspace(*buf_p)) buf_p++;
978 /* Skip comment lines and empty lines. */
979 if (*buf_p == '#' || *buf_p == '\0') return 0;
981 /* Process the spec file entries */
982 va_start(ap, num_args);
985 while (items < num_args) {
986 spec_entry = va_arg(ap, char**);
988 if (len - 1 == buf_p - line_buf) {
993 rc = read_spec_entry(spec_entry, &buf_p, &entry_len);
998 if (entry_len) items++;
1004 static bool initialize_properties_from_file(const char* filename) {
1005 FILE* file = fopen(filename, "re");
1010 char* buffer = nullptr;
1012 char* prop_prefix = nullptr;
1013 char* context = nullptr;
1015 while (getline(&buffer, &line_len, file) > 0) {
1016 int items = read_spec_entries(buffer, 2, &prop_prefix, &context);
1025 * init uses ctl.* properties as an IPC mechanism and does not write them
1026 * to a property file, therefore we do not need to create property files
1029 if (!strncmp(prop_prefix, "ctl.", 4)) {
1036 list_find(contexts, [context](context_node* l) { return !strcmp(l->context(), context); });
1038 list_add_after_len(&prefixes, prop_prefix, old_context);
1040 list_add(&contexts, context, nullptr);
1041 list_add_after_len(&prefixes, prop_prefix, contexts);
1053 static bool initialize_properties() {
1054 // If we do find /property_contexts, then this is being
1055 // run as part of the OTA updater on older release that had
1056 // /property_contexts - b/34370523
1057 if (initialize_properties_from_file("/property_contexts")) {
1061 // Use property_contexts from /system & /vendor, fall back to those from /
1062 if (access("/system/etc/selinux/plat_property_contexts", R_OK) != -1) {
1063 if (!initialize_properties_from_file("/system/etc/selinux/plat_property_contexts")) {
1066 if (!initialize_properties_from_file("/vendor/etc/selinux/nonplat_property_contexts")) {
1070 if (!initialize_properties_from_file("/plat_property_contexts")) {
1073 if (!initialize_properties_from_file("/nonplat_property_contexts")) {
1081 static bool is_dir(const char* pathname) {
1083 if (stat(pathname, &info) == -1) {
1086 return S_ISDIR(info.st_mode);
1089 static void free_and_unmap_contexts() {
1090 list_free(&prefixes);
1091 list_free(&contexts);
1092 if (__system_property_area__) {
1093 munmap(__system_property_area__, pa_size);
1094 __system_property_area__ = nullptr;
1098 int __system_properties_init() {
1100 list_foreach(contexts, [](context_node* l) { l->reset_access(); });
1103 if (is_dir(property_filename)) {
1104 if (!initialize_properties()) {
1107 if (!map_system_property_area(false, nullptr)) {
1108 free_and_unmap_contexts();
1112 __system_property_area__ = map_prop_area(property_filename);
1113 if (!__system_property_area__) {
1116 list_add(&contexts, "legacy_system_prop_area", __system_property_area__);
1117 list_add_after_len(&prefixes, "*", contexts);
1123 int __system_property_set_filename(const char* filename) {
1124 size_t len = strlen(filename);
1125 if (len >= sizeof(property_filename)) return -1;
1127 strcpy(property_filename, filename);
1131 int __system_property_area_init() {
1132 free_and_unmap_contexts();
1133 mkdir(property_filename, S_IRWXU | S_IXGRP | S_IXOTH);
1134 if (!initialize_properties()) {
1137 bool open_failed = false;
1138 bool fsetxattr_failed = false;
1139 list_foreach(contexts, [&fsetxattr_failed, &open_failed](context_node* l) {
1140 if (!l->open(true, &fsetxattr_failed)) {
1144 if (open_failed || !map_system_property_area(true, &fsetxattr_failed)) {
1145 free_and_unmap_contexts();
1149 return fsetxattr_failed ? -2 : 0;
1152 uint32_t __system_property_area_serial() {
1153 prop_area* pa = __system_property_area__;
1157 // Make sure this read fulfilled before __system_property_serial
1158 return atomic_load_explicit(pa->serial(), memory_order_acquire);
1161 const prop_info* __system_property_find(const char* name) {
1162 if (!__system_property_area__) {
1166 prop_area* pa = get_prop_area_for_name(name);
1168 __libc_format_log(ANDROID_LOG_ERROR, "libc", "Access denied finding property \"%s\"", name);
1172 return pa->find(name);
1175 // The C11 standard doesn't allow atomic loads from const fields,
1176 // though C++11 does. Fudge it until standards get straightened out.
1177 static inline uint_least32_t load_const_atomic(const atomic_uint_least32_t* s, memory_order mo) {
1178 atomic_uint_least32_t* non_const_s = const_cast<atomic_uint_least32_t*>(s);
1179 return atomic_load_explicit(non_const_s, mo);
1182 int __system_property_read(const prop_info* pi, char* name, char* value) {
1184 uint32_t serial = __system_property_serial(pi); // acquire semantics
1185 size_t len = SERIAL_VALUE_LEN(serial);
1186 memcpy(value, pi->value, len + 1);
1187 // TODO: Fix the synchronization scheme here.
1188 // There is no fully supported way to implement this kind
1189 // of synchronization in C++11, since the memcpy races with
1190 // updates to pi, and the data being accessed is not atomic.
1191 // The following fence is unintuitive, but would be the
1192 // correct one if memcpy used memory_order_relaxed atomic accesses.
1193 // In practice it seems unlikely that the generated code would
1194 // would be any different, so this should be OK.
1195 atomic_thread_fence(memory_order_acquire);
1196 if (serial == load_const_atomic(&(pi->serial), memory_order_relaxed)) {
1197 if (name != nullptr) {
1198 size_t namelen = strlcpy(name, pi->name, PROP_NAME_MAX);
1199 if (namelen >= PROP_NAME_MAX) {
1200 __libc_format_log(ANDROID_LOG_ERROR, "libc",
1201 "The property name length for \"%s\" is >= %d;"
1202 " please use __system_property_read_callback"
1203 " to read this property. (the name is truncated to \"%s\")",
1204 pi->name, PROP_NAME_MAX - 1, name);
1212 void __system_property_read_callback(const prop_info* pi,
1213 void (*callback)(void* cookie,
1219 uint32_t serial = __system_property_serial(pi); // acquire semantics
1220 size_t len = SERIAL_VALUE_LEN(serial);
1221 char value_buf[len + 1];
1223 memcpy(value_buf, pi->value, len);
1224 value_buf[len] = '\0';
1226 // TODO: see todo in __system_property_read function
1227 atomic_thread_fence(memory_order_acquire);
1228 if (serial == load_const_atomic(&(pi->serial), memory_order_relaxed)) {
1229 callback(cookie, pi->name, value_buf, serial);
1235 int __system_property_get(const char* name, char* value) {
1236 const prop_info* pi = __system_property_find(name);
1239 return __system_property_read(pi, nullptr, value);
1246 static constexpr uint32_t kProtocolVersion1 = 1;
1247 static constexpr uint32_t kProtocolVersion2 = 2; // current
1249 static atomic_uint_least32_t g_propservice_protocol_version = 0;
1251 static void detect_protocol_version() {
1252 char value[PROP_VALUE_MAX];
1253 if (__system_property_get(kServiceVersionPropertyName, value) == 0) {
1254 g_propservice_protocol_version = kProtocolVersion1;
1255 __libc_format_log(ANDROID_LOG_WARN, "libc",
1256 "Using old property service protocol (\"%s\" is not set)",
1257 kServiceVersionPropertyName);
1259 uint32_t version = static_cast<uint32_t>(atoll(value));
1260 if (version >= kProtocolVersion2) {
1261 g_propservice_protocol_version = kProtocolVersion2;
1263 __libc_format_log(ANDROID_LOG_WARN, "libc",
1264 "Using old property service protocol (\"%s\"=\"%s\")",
1265 kServiceVersionPropertyName, value);
1266 g_propservice_protocol_version = kProtocolVersion1;
1271 int __system_property_set(const char* key, const char* value) {
1272 if (key == nullptr) return -1;
1273 if (value == nullptr) value = "";
1274 if (strlen(value) >= PROP_VALUE_MAX) return -1;
1276 if (g_propservice_protocol_version == 0) {
1277 detect_protocol_version();
1280 if (g_propservice_protocol_version == kProtocolVersion1) {
1281 // Old protocol does not support long names
1282 if (strlen(key) >= PROP_NAME_MAX) return -1;
1285 memset(&msg, 0, sizeof msg);
1286 msg.cmd = PROP_MSG_SETPROP;
1287 strlcpy(msg.name, key, sizeof msg.name);
1288 strlcpy(msg.value, value, sizeof msg.value);
1290 return send_prop_msg(&msg);
1292 // Use proper protocol
1293 PropertyServiceConnection connection;
1294 if (!connection.IsValid()) {
1295 errno = connection.GetLastError();
1296 __libc_format_log(ANDROID_LOG_WARN,
1298 "Unable to set property \"%s\" to \"%s\": connection failed; errno=%d (%s)",
1306 SocketWriter writer(&connection);
1307 if (!writer.WriteUint32(PROP_MSG_SETPROP2).WriteString(key).WriteString(value).Send()) {
1308 errno = connection.GetLastError();
1309 __libc_format_log(ANDROID_LOG_WARN,
1311 "Unable to set property \"%s\" to \"%s\": write failed; errno=%d (%s)",
1320 if (!connection.RecvInt32(&result)) {
1321 errno = connection.GetLastError();
1322 __libc_format_log(ANDROID_LOG_WARN,
1324 "Unable to set property \"%s\" to \"%s\": recv failed; errno=%d (%s)",
1332 if (result != PROP_SUCCESS) {
1333 __libc_format_log(ANDROID_LOG_WARN,
1335 "Unable to set property \"%s\" to \"%s\": error code: 0x%x",
1346 int __system_property_update(prop_info* pi, const char* value, unsigned int len) {
1347 if (len >= PROP_VALUE_MAX) {
1351 prop_area* pa = __system_property_area__;
1357 uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed);
1359 atomic_store_explicit(&pi->serial, serial, memory_order_relaxed);
1360 // The memcpy call here also races. Again pretend it
1361 // used memory_order_relaxed atomics, and use the analogous
1362 // counterintuitive fence.
1363 atomic_thread_fence(memory_order_release);
1364 strlcpy(pi->value, value, len + 1);
1366 atomic_store_explicit(&pi->serial, (len << 24) | ((serial + 1) & 0xffffff), memory_order_release);
1367 __futex_wake(&pi->serial, INT32_MAX);
1369 atomic_store_explicit(pa->serial(), atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1,
1370 memory_order_release);
1371 __futex_wake(pa->serial(), INT32_MAX);
1376 int __system_property_add(const char* name, unsigned int namelen, const char* value,
1377 unsigned int valuelen) {
1378 if (valuelen >= PROP_VALUE_MAX) {
1386 if (!__system_property_area__) {
1390 prop_area* pa = get_prop_area_for_name(name);
1393 __libc_format_log(ANDROID_LOG_ERROR, "libc", "Access denied adding property \"%s\"", name);
1397 bool ret = pa->add(name, namelen, value, valuelen);
1402 // There is only a single mutator, but we want to make sure that
1403 // updates are visible to a reader waiting for the update.
1404 atomic_store_explicit(
1405 __system_property_area__->serial(),
1406 atomic_load_explicit(__system_property_area__->serial(), memory_order_relaxed) + 1,
1407 memory_order_release);
1408 __futex_wake(__system_property_area__->serial(), INT32_MAX);
1412 // Wait for non-locked serial, and retrieve it with acquire semantics.
1413 uint32_t __system_property_serial(const prop_info* pi) {
1414 uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire);
1415 while (SERIAL_DIRTY(serial)) {
1416 __futex_wait(const_cast<_Atomic(uint_least32_t)*>(&pi->serial), serial, nullptr);
1417 serial = load_const_atomic(&pi->serial, memory_order_acquire);
1422 uint32_t __system_property_wait_any(uint32_t old_serial) {
1423 uint32_t new_serial;
1424 __system_property_wait(nullptr, old_serial, &new_serial, nullptr);
1428 bool __system_property_wait(const prop_info* pi,
1429 uint32_t old_serial,
1430 uint32_t* new_serial_ptr,
1431 const timespec* relative_timeout) {
1432 // Are we waiting on the global serial or a specific serial?
1433 atomic_uint_least32_t* serial_ptr;
1434 if (pi == nullptr) {
1435 if (__system_property_area__ == nullptr) return -1;
1436 serial_ptr = __system_property_area__->serial();
1438 serial_ptr = const_cast<atomic_uint_least32_t*>(&pi->serial);
1441 uint32_t new_serial;
1444 if ((rc = __futex_wait(serial_ptr, old_serial, relative_timeout)) != 0 && rc == -ETIMEDOUT) {
1447 new_serial = load_const_atomic(serial_ptr, memory_order_acquire);
1448 } while (new_serial == old_serial);
1450 *new_serial_ptr = new_serial;
1454 const prop_info* __system_property_find_nth(unsigned n) {
1455 if (bionic_get_application_target_sdk_version() >= __ANDROID_API_O__) {
1457 "__system_property_find_nth is not supported since Android O,"
1458 " please use __system_property_foreach instead.");
1461 find_nth_cookie cookie(n);
1463 const int err = __system_property_foreach(find_nth_fn, &cookie);
1471 int __system_property_foreach(void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
1472 if (!__system_property_area__) {
1476 list_foreach(contexts, [propfn, cookie](context_node* l) {
1477 if (l->check_access_and_open()) {
1478 l->pa()->foreach (propfn, cookie);