| /* SPDX-License-Identifier: LGPL-2.1+ */ |
| |
| #include <errno.h> |
| #include <limits.h> |
| #include <poll.h> |
| #include <stdio.h> |
| #include <unistd.h> |
| |
| #include "io-util.h" |
| #include "string-util.h" |
| #include "time-util.h" |
| |
| int flush_fd(int fd) { |
| struct pollfd pollfd = { |
| .fd = fd, |
| .events = POLLIN, |
| }; |
| int count = 0; |
| |
| /* Read from the specified file descriptor, until POLLIN is not set anymore, throwing away everything |
| * read. Note that some file descriptors (notable IP sockets) will trigger POLLIN even when no data can be read |
| * (due to IP packet checksum mismatches), hence this function is only safe to be non-blocking if the fd used |
| * was set to non-blocking too. */ |
| |
| for (;;) { |
| char buf[LINE_MAX]; |
| ssize_t l; |
| int r; |
| |
| r = poll(&pollfd, 1, 0); |
| if (r < 0) { |
| if (errno == EINTR) |
| continue; |
| |
| return -errno; |
| |
| } else if (r == 0) |
| return count; |
| |
| l = read(fd, buf, sizeof(buf)); |
| if (l < 0) { |
| |
| if (errno == EINTR) |
| continue; |
| |
| if (errno == EAGAIN) |
| return count; |
| |
| return -errno; |
| } else if (l == 0) |
| return count; |
| |
| count += (int) l; |
| } |
| } |
| |
| ssize_t loop_read(int fd, void *buf, size_t nbytes, bool do_poll) { |
| uint8_t *p = buf; |
| ssize_t n = 0; |
| |
| assert(fd >= 0); |
| assert(buf); |
| |
| /* If called with nbytes == 0, let's call read() at least |
| * once, to validate the operation */ |
| |
| if (nbytes > (size_t) SSIZE_MAX) |
| return -EINVAL; |
| |
| do { |
| ssize_t k; |
| |
| k = read(fd, p, nbytes); |
| if (k < 0) { |
| if (errno == EINTR) |
| continue; |
| |
| if (errno == EAGAIN && do_poll) { |
| |
| /* We knowingly ignore any return value here, |
| * and expect that any error/EOF is reported |
| * via read() */ |
| |
| (void) fd_wait_for_event(fd, POLLIN, USEC_INFINITY); |
| continue; |
| } |
| |
| return n > 0 ? n : -errno; |
| } |
| |
| if (k == 0) |
| return n; |
| |
| assert((size_t) k <= nbytes); |
| |
| p += k; |
| nbytes -= k; |
| n += k; |
| } while (nbytes > 0); |
| |
| return n; |
| } |
| |
| int loop_read_exact(int fd, void *buf, size_t nbytes, bool do_poll) { |
| ssize_t n; |
| |
| n = loop_read(fd, buf, nbytes, do_poll); |
| if (n < 0) |
| return (int) n; |
| if ((size_t) n != nbytes) |
| return -EIO; |
| |
| return 0; |
| } |
| |
| int loop_write(int fd, const void *buf, size_t nbytes, bool do_poll) { |
| const uint8_t *p = buf; |
| |
| assert(fd >= 0); |
| assert(buf); |
| |
| if (_unlikely_(nbytes > (size_t) SSIZE_MAX)) |
| return -EINVAL; |
| |
| do { |
| ssize_t k; |
| |
| k = write(fd, p, nbytes); |
| if (k < 0) { |
| if (errno == EINTR) |
| continue; |
| |
| if (errno == EAGAIN && do_poll) { |
| /* We knowingly ignore any return value here, |
| * and expect that any error/EOF is reported |
| * via write() */ |
| |
| (void) fd_wait_for_event(fd, POLLOUT, USEC_INFINITY); |
| continue; |
| } |
| |
| return -errno; |
| } |
| |
| if (_unlikely_(nbytes > 0 && k == 0)) /* Can't really happen */ |
| return -EIO; |
| |
| assert((size_t) k <= nbytes); |
| |
| p += k; |
| nbytes -= k; |
| } while (nbytes > 0); |
| |
| return 0; |
| } |
| |
| int pipe_eof(int fd) { |
| struct pollfd pollfd = { |
| .fd = fd, |
| .events = POLLIN|POLLHUP, |
| }; |
| |
| int r; |
| |
| r = poll(&pollfd, 1, 0); |
| if (r < 0) |
| return -errno; |
| |
| if (r == 0) |
| return 0; |
| |
| return pollfd.revents & POLLHUP; |
| } |
| |
| int fd_wait_for_event(int fd, int event, usec_t t) { |
| |
| struct pollfd pollfd = { |
| .fd = fd, |
| .events = event, |
| }; |
| |
| struct timespec ts; |
| int r; |
| |
| r = ppoll(&pollfd, 1, t == USEC_INFINITY ? NULL : timespec_store(&ts, t), NULL); |
| if (r < 0) |
| return -errno; |
| if (r == 0) |
| return 0; |
| |
| return pollfd.revents; |
| } |
| |
| static size_t nul_length(const uint8_t *p, size_t sz) { |
| size_t n = 0; |
| |
| while (sz > 0) { |
| if (*p != 0) |
| break; |
| |
| n++; |
| p++; |
| sz--; |
| } |
| |
| return n; |
| } |
| |
| ssize_t sparse_write(int fd, const void *p, size_t sz, size_t run_length) { |
| const uint8_t *q, *w, *e; |
| ssize_t l; |
| |
| q = w = p; |
| e = q + sz; |
| while (q < e) { |
| size_t n; |
| |
| n = nul_length(q, e - q); |
| |
| /* If there are more than the specified run length of |
| * NUL bytes, or if this is the beginning or the end |
| * of the buffer, then seek instead of write */ |
| if ((n > run_length) || |
| (n > 0 && q == p) || |
| (n > 0 && q + n >= e)) { |
| if (q > w) { |
| l = write(fd, w, q - w); |
| if (l < 0) |
| return -errno; |
| if (l != q -w) |
| return -EIO; |
| } |
| |
| if (lseek(fd, n, SEEK_CUR) == (off_t) -1) |
| return -errno; |
| |
| q += n; |
| w = q; |
| } else if (n > 0) |
| q += n; |
| else |
| q++; |
| } |
| |
| if (q > w) { |
| l = write(fd, w, q - w); |
| if (l < 0) |
| return -errno; |
| if (l != q - w) |
| return -EIO; |
| } |
| |
| return q - (const uint8_t*) p; |
| } |
| |
| char* set_iovec_string_field(struct iovec *iovec, size_t *n_iovec, const char *field, const char *value) { |
| char *x; |
| |
| x = strjoin(field, value); |
| if (x) |
| iovec[(*n_iovec)++] = IOVEC_MAKE_STRING(x); |
| return x; |
| } |
| |
| char* set_iovec_string_field_free(struct iovec *iovec, size_t *n_iovec, const char *field, char *value) { |
| char *x; |
| |
| x = set_iovec_string_field(iovec, n_iovec, field, value); |
| free(value); |
| return x; |
| } |
| |
| struct iovec_wrapper *iovw_new(void) { |
| return malloc0(sizeof(struct iovec_wrapper)); |
| } |
| |
| void iovw_free_contents(struct iovec_wrapper *iovw, bool free_vectors) { |
| if (free_vectors) |
| for (size_t i = 0; i < iovw->count; i++) |
| free(iovw->iovec[i].iov_base); |
| |
| iovw->iovec = mfree(iovw->iovec); |
| iovw->count = 0; |
| iovw->size_bytes = 0; |
| } |
| |
| struct iovec_wrapper *iovw_free_free(struct iovec_wrapper *iovw) { |
| iovw_free_contents(iovw, true); |
| |
| return mfree(iovw); |
| } |
| |
| struct iovec_wrapper *iovw_free(struct iovec_wrapper *iovw) { |
| iovw_free_contents(iovw, false); |
| |
| return mfree(iovw); |
| } |
| |
| int iovw_put(struct iovec_wrapper *iovw, void *data, size_t len) { |
| if (iovw->count >= IOV_MAX) |
| return -E2BIG; |
| |
| if (!GREEDY_REALLOC(iovw->iovec, iovw->size_bytes, iovw->count + 1)) |
| return log_oom(); |
| |
| iovw->iovec[iovw->count++] = IOVEC_MAKE(data, len); |
| return 0; |
| } |
| |
| int iovw_put_string_field(struct iovec_wrapper *iovw, const char *field, const char *value) { |
| _cleanup_free_ char *x = NULL; |
| int r; |
| |
| x = strjoin(field, value); |
| if (!x) |
| return log_oom(); |
| |
| r = iovw_put(iovw, x, strlen(x)); |
| if (r >= 0) |
| TAKE_PTR(x); |
| |
| return r; |
| } |
| |
| int iovw_put_string_field_free(struct iovec_wrapper *iovw, const char *field, char *value) { |
| _cleanup_free_ _unused_ char *free_ptr = value; |
| |
| return iovw_put_string_field(iovw, field, value); |
| } |
| |
| void iovw_rebase(struct iovec_wrapper *iovw, char *old, char *new) { |
| size_t i; |
| |
| for (i = 0; i < iovw->count; i++) |
| iovw->iovec[i].iov_base = (char *)iovw->iovec[i].iov_base - old + new; |
| } |
| |
| size_t iovw_size(struct iovec_wrapper *iovw) { |
| size_t n = 0, i; |
| |
| for (i = 0; i < iovw->count; i++) |
| n += iovw->iovec[i].iov_len; |
| |
| return n; |
| } |