aboutsummaryrefslogtreecommitdiffstats
path: root/recipes-core/swupd-client/swupd-client/0001-downloads-minimize-syscalls-to-improve-performance.patch
blob: 59875b7f9fdf8a1dc9e4b9298cbbee4960a65c8b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
From 9bc713b7ed0dba91304c5d7ed4905f5924ad8e42 Mon Sep 17 00:00:00 2001
From: Patrick Ohly <patrick.ohly@intel.com>
Date: Thu, 14 Apr 2016 11:03:31 +0200
Subject: [PATCH 1/3] downloads: minimize syscalls to improve performance

The previous approach was to open/fdopen/fclose the file for each
chunk that gets passed from curl. This incurrs a huge performance hit
when close() triggers a hashing of the file content on systems where
integrity protection via IMA is enabled.

Now the file is opened only once and kept open until the download is
complete. In addition, the unnecessary usage of C file IO is avoided.

The semantic is changed as little as possible:
- file gets created only after the first chunk of data arrived
- file descriptors do not leak to child processes (O_CLOEXEC)
- data gets appended to existing files (via O_APPEND, used
  to keep the code simple and avoid an additional lseek)
- data gets flushed explicitly for each chunk (via fdatasync(),
  which somewhat approximates the effect that an explicit
  close() may have had)

As an additional improvement, failures during close() are checked. To
keep error handling as much as before, the completion function which has
the close() takes the current curl error code and replaces it if it
encounters a write error.

[v2 of the patch with fixes by Dmitry Rozhkov, see https://github.com/pohly/swupd-client/pull/1]

Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
---
 include/swupd.h |  3 +++
 src/curl.c      | 63 ++++++++++++++++++++++++++++++++++++++++-----------------
 src/download.c  |  8 +++++++-
 3 files changed, 54 insertions(+), 20 deletions(-)

diff --git a/include/swupd.h b/include/swupd.h
index 14e65ab..3bac8b2 100644
--- a/include/swupd.h
+++ b/include/swupd.h
@@ -89,6 +89,7 @@ struct file {
 	int last_change;
 	struct update_stat stat;
 
+	unsigned int fd_valid : 1;
 	unsigned int is_dir : 1;
 	unsigned int is_file : 1;
 	unsigned int is_link : 1;
@@ -109,6 +110,7 @@ struct file {
 
 	char *staging; /* output name used during download & staging */
 	CURL *curl;    /* curl handle if downloading */
+	int fd;        /* file written into during downloading, unset when fd_valid is false */
 };
 
 extern bool download_only;
@@ -199,6 +201,7 @@ extern void swupd_curl_set_current_version(int v);
 extern void swupd_curl_set_requested_version(int v);
 extern double swupd_query_url_content_size(char *url);
 extern size_t swupd_download_file(void *ptr, size_t size, size_t nmemb, void *userdata);
+extern CURLcode swupd_download_file_complete(CURLcode curl_ret, struct file *file);
 extern int swupd_curl_get_file(const char *url, char *filename, struct file *file,
 			       struct version_container *tmp_version, bool pack);
 #define SWUPD_CURL_LOW_SPEED_LIMIT 1
diff --git a/src/curl.c b/src/curl.c
index b14193b..cab1ef2 100644
--- a/src/curl.c
+++ b/src/curl.c
@@ -165,35 +165,57 @@ size_t swupd_download_file(void *ptr, size_t size, size_t nmemb, void *userdata)
 	const char *outfile;
 	int fd;
 	FILE *f;
-	size_t written;
+	size_t written, remaining;
 
 	outfile = file->staging;
+	if (file->fd_valid) {
+		fd = file->fd;
+	} else {
+		fd = open(outfile, O_CREAT | O_RDWR | O_CLOEXEC | O_APPEND, 00600);
+		if (fd < 0) {
+			fprintf(stderr, "Cannot open file for write \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
+				outfile, strerror(errno));
+			return -1;
+		}
+		file->fd = fd;
+		file->fd_valid = 1;
+	}
 
-	fd = open(outfile, O_CREAT | O_RDWR, 00600);
-	if (fd < 0) {
-		printf("Error: Cannot open %s for write: %s\n",
-		       outfile, strerror(errno));
-		return -1;
+	/* handle short writes with repeated write() calls */
+	for (remaining = size * nmemb; remaining; remaining -= written) {
+		written = write(fd, ptr, size*nmemb);
+		if (written < 0) {
+			if (errno == EINTR) {
+				written = 0;
+				continue;
+			}
+			fprintf(stderr, "write error \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
+				outfile, strerror(errno));
+			return -1;
+		}
 	}
 
-	f = fdopen(fd, "a");
-	if (!f) {
-		printf("Error: Cannot fdopen %s for write: %s\n",
-		       outfile, strerror(errno));
-		close(fd);
+	if (fdatasync(fd)) {
+		fprintf(stderr, "fdatasync \\*outfile=\"%s\",strerror=\"%s\"*\\\n", outfile, strerror(errno));
 		return -1;
 	}
 
-	written = fwrite(ptr, size * nmemb, 1, f);
-
-	fflush(f);
-	fclose(f);
+	return size*nmemb;
+}
 
-	if (written != 1) {
-		return -1;
+CURLcode swupd_download_file_complete(CURLcode curl_ret, struct file *file)
+{
+	if (file->fd_valid) {
+		if (close(file->fd)) {
+			fprintf(stderr, "Cannot close file after write \\*outfile=\"%s\",strerror=\"%s\"*\\\n",
+				file->staging, strerror(errno));
+			if (curl_ret == CURLE_OK) {
+				curl_ret = CURLE_WRITE_ERROR;
+			}
+		}
+		file->fd_valid = 0;
 	}
-
-	return size * nmemb;
+	return curl_ret;
 }
 
 /* Download a single file SYNCHRONOUSLY
@@ -281,6 +303,9 @@ int swupd_curl_get_file(const char *url, char *filename, struct file *file,
 	}
 
 exit:
+	if (local) {
+		curl_ret = swupd_download_file_complete(curl_ret, local);
+	}
 	if (curl_ret == CURLE_OK) {
 		/* curl command succeeded, download might've failed, let our caller handle */
 		switch (ret) {
diff --git a/src/download.c b/src/download.c
index 6d81d81..c4a7a07 100644
--- a/src/download.c
+++ b/src/download.c
@@ -164,6 +164,7 @@ static void free_curl_list_data(void *data)
 {
 	struct file *file = (struct file *)data;
 	CURL *curl = file->curl;
+	(void) swupd_download_file_complete(CURLE_OK, file);
 	if (curl != NULL) {
 		curl_multi_remove_handle(mcurl, curl);
 		curl_easy_cleanup(curl);
@@ -368,9 +369,14 @@ static int perform_curl_io_and_complete(int *left)
 			continue;
 		}
 
+		/* Get error code from easy handle and augment it if
+		 * completing the download encounters further problems. */
+		curl_ret = msg->data.result;
+		curl_ret = swupd_download_file_complete(curl_ret, file);
+
 		/* The easy handle may have an error set, even if the server returns
 		 * HTTP 200, so retry the download for this case. */
-		if (ret == 200 && msg->data.result != CURLE_OK) {
+		if (ret == 200 && curl_ret != CURLE_OK) {
 			printf("Error for %s download: %s\n", file->hash,
 			       curl_easy_strerror(msg->data.result));
 			failed = list_prepend_data(failed, file);
-- 
2.1.4