aboutsummaryrefslogtreecommitdiffstats
path: root/recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch')
-rw-r--r--recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch b/recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch
new file mode 100644
index 0000000..59875b7
--- /dev/null
+++ b/recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch
@@ -0,0 +1,186 @@
+From 9bc713b7ed0dba91304c5d7ed4905f5924ad8e42 Mon Sep 17 00:00:00 2001
+From: Patrick Ohly <patrick.ohly@intel.com>
+Date: Thu, 14 Apr 2016 11:03:31 +0200
+Subject: [PATCH 1/3] downloads: minimize syscalls to improve performance
+
+The previous approach was to open/fdopen/fclose the file for each
+chunk that gets passed from curl. This incurrs a huge performance hit
+when close() triggers a hashing of the file content on systems where
+integrity protection via IMA is enabled.
+
+Now the file is opened only once and kept open until the download is
+complete. In addition, the unnecessary usage of C file IO is avoided.
+
+The semantic is changed as little as possible:
+- file gets created only after the first chunk of data arrived
+- file descriptors do not leak to child processes (O_CLOEXEC)
+- data gets appended to existing files (via O_APPEND, used
+ to keep the code simple and avoid an additional lseek)
+- data gets flushed explicitly for each chunk (via fdatasync(),
+ which somewhat approximates the effect that an explicit
+ close() may have had)
+
+As an additional improvement, failures during close() are checked. To
+keep error handling as much as before, the completion function which has
+the close() takes the current curl error code and replaces it if it
+encounters a write error.
+
+[v2 of the patch with fixes by Dmitry Rozhkov, see https://github.com/pohly/swupd-client/pull/1]
+
+Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
+---
+ include/swupd.h | 3 +++
+ src/curl.c | 63 ++++++++++++++++++++++++++++++++++++++++-----------------
+ src/download.c | 8 +++++++-
+ 3 files changed, 54 insertions(+), 20 deletions(-)
+
+diff --git a/include/swupd.h b/include/swupd.h
+index 14e65ab..3bac8b2 100644
+--- a/include/swupd.h
++++ b/include/swupd.h
+@@ -89,6 +89,7 @@ struct file {
+ int last_change;
+ struct update_stat stat;
+
++ unsigned int fd_valid : 1;
+ unsigned int is_dir : 1;
+ unsigned int is_file : 1;
+ unsigned int is_link : 1;
+@@ -109,6 +110,7 @@ struct file {
+
+ char *staging; /* output name used during download & staging */
+ CURL *curl; /* curl handle if downloading */
++ int fd; /* file written into during downloading, unset when fd_valid is false */
+ };
+
+ extern bool download_only;
+@@ -199,6 +201,7 @@ extern void swupd_curl_set_current_version(int v);
+ extern void swupd_curl_set_requested_version(int v);
+ extern double swupd_query_url_content_size(char *url);
+ extern size_t swupd_download_file(void *ptr, size_t size, size_t nmemb, void *userdata);
++extern CURLcode swupd_download_file_complete(CURLcode curl_ret, struct file *file);
+ extern int swupd_curl_get_file(const char *url, char *filename, struct file *file,
+ struct version_container *tmp_version, bool pack);
+ #define SWUPD_CURL_LOW_SPEED_LIMIT 1
+diff --git a/src/curl.c b/src/curl.c
+index b14193b..cab1ef2 100644
+--- a/src/curl.c
++++ b/src/curl.c
+@@ -165,35 +165,57 @@ size_t swupd_download_file(void *ptr, size_t size, size_t nmemb, void *userdata)
+ const char *outfile;
+ int fd;
+ FILE *f;
+- size_t written;
++ size_t written, remaining;
+
+ outfile = file->staging;
++ if (file->fd_valid) {
++ fd = file->fd;
++ } else {
++ fd = open(outfile, O_CREAT | O_RDWR | O_CLOEXEC | O_APPEND, 00600);
++ if (fd < 0) {
++ fprintf(stderr, "Cannot open file for write \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
++ outfile, strerror(errno));
++ return -1;
++ }
++ file->fd = fd;
++ file->fd_valid = 1;
++ }
+
+- fd = open(outfile, O_CREAT | O_RDWR, 00600);
+- if (fd < 0) {
+- printf("Error: Cannot open %s for write: %s\n",
+- outfile, strerror(errno));
+- return -1;
++ /* handle short writes with repeated write() calls */
++ for (remaining = size * nmemb; remaining; remaining -= written) {
++ written = write(fd, ptr, size*nmemb);
++ if (written < 0) {
++ if (errno == EINTR) {
++ written = 0;
++ continue;
++ }
++ fprintf(stderr, "write error \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
++ outfile, strerror(errno));
++ return -1;
++ }
+ }
+
+- f = fdopen(fd, "a");
+- if (!f) {
+- printf("Error: Cannot fdopen %s for write: %s\n",
+- outfile, strerror(errno));
+- close(fd);
++ if (fdatasync(fd)) {
++ fprintf(stderr, "fdatasync \\*outfile=\"%s\",strerror=\"%s\"*\\\n", outfile, strerror(errno));
+ return -1;
+ }
+
+- written = fwrite(ptr, size * nmemb, 1, f);
+-
+- fflush(f);
+- fclose(f);
++ return size*nmemb;
++}
+
+- if (written != 1) {
+- return -1;
++CURLcode swupd_download_file_complete(CURLcode curl_ret, struct file *file)
++{
++ if (file->fd_valid) {
++ if (close(file->fd)) {
++ fprintf(stderr, "Cannot close file after write \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
++ file->staging, strerror(errno));
++ if (curl_ret == CURLE_OK) {
++ curl_ret = CURLE_WRITE_ERROR;
++ }
++ }
++ file->fd_valid = 0;
+ }
+-
+- return size * nmemb;
++ return curl_ret;
+ }
+
+ /* Download a single file SYNCHRONOUSLY
+@@ -281,6 +303,9 @@ int swupd_curl_get_file(const char *url, char *filename, struct file *file,
+ }
+
+ exit:
++ if (local) {
++ curl_ret = swupd_download_file_complete(curl_ret, local);
++ }
+ if (curl_ret == CURLE_OK) {
+ /* curl command succeeded, download might've failed, let our caller handle */
+ switch (ret) {
+diff --git a/src/download.c b/src/download.c
+index 6d81d81..c4a7a07 100644
+--- a/src/download.c
++++ b/src/download.c
+@@ -164,6 +164,7 @@ static void free_curl_list_data(void *data)
+ {
+ struct file *file = (struct file *)data;
+ CURL *curl = file->curl;
++ (void) swupd_download_file_complete(CURLE_OK, file);
+ if (curl != NULL) {
+ curl_multi_remove_handle(mcurl, curl);
+ curl_easy_cleanup(curl);
+@@ -368,9 +369,14 @@ static int perform_curl_io_and_complete(int *left)
+ continue;
+ }
+
++ /* Get error code from easy handle and augment it if
++ * completing the download encounters further problems. */
++ curl_ret = msg->data.result;
++ curl_ret = swupd_download_file_complete(curl_ret, file);
++
+ /* The easy handle may have an error set, even if the server returns
+ * HTTP 200, so retry the download for this case. */
+- if (ret == 200 && msg->data.result != CURLE_OK) {
++ if (ret == 200 && curl_ret != CURLE_OK) {
+ printf("Error for %s download: %s\n", file->hash,
+ curl_easy_strerror(msg->data.result));
+ failed = list_prepend_data(failed, file);
+--
+2.1.4
+