summaryrefslogtreecommitdiffstats
path: root/lib/net.c
diff options
context:
space:
mode:
authorLibravatar bigfoot547 <bigfoot@figboot.dev>2026-01-12 16:30:37 -0600
committerLibravatar bigfoot547 <bigfoot@figboot.dev>2026-01-12 16:30:37 -0600
commit2016dceaa9cfc65ee80ee7e433331390f4263744 (patch)
treed0c97b2ab5fc1933298a050f167b6a68fff08c89 /lib/net.c
parentadd support for verified files (diff)
download jobs
Diffstat (limited to 'lib/net.c')
-rw-r--r--lib/net.c327
1 files changed, 327 insertions, 0 deletions
diff --git a/lib/net.c b/lib/net.c
index 99c7deb..f400974 100644
--- a/lib/net.c
+++ b/lib/net.c
@@ -492,3 +492,330 @@ int vl_net_ensure_verified(const char *url, const char *target_path, unsigned fl
return ret;
}
+struct dl_all_ctx {
+ struct write_ctx wrctx;
+ struct vl_download_job *cur_job;
+ char errbuf[CURL_ERROR_SIZE];
+};
+
+static int setup_easy(CURLM *multi, CURL *easy, struct vl_download_job *job)
+{
+ int ret = NET_EUNSPEC;
+ CURLcode ecode;
+ CURLMcode mcode;
+
+ struct dl_all_ctx *dlctx;
+
+ if ((ecode = curl_easy_getinfo(easy, CURLINFO_PRIVATE, &dlctx)) != CURLE_OK || !dlctx) {
+ vl_error("Bad curl easy handle! curl_easy_getinfo(CURLINFO_PRIVATE): %s", curl_easy_strerror(ecode));
+ abort(); /* weird should never happen */
+ }
+
+#define C(_ex) CHECK(_ex, #_ex, ecode, dlctx->errbuf, cleanup, ret)
+
+ C(curl_easy_setopt(easy, CURLOPT_URL, job->url));
+
+#undef C
+
+ if (!(dlctx->wrctx.ofile = fopen(job->opath, "wb"))) {
+ vl_warn("Failed to open output file %s: %s", job->opath, strerror(errno));
+ ret = NET_EIO;
+ goto cleanup;
+ }
+
+ dlctx->wrctx.opath = NULL;
+ dlctx->wrctx.total_read = 0;
+ vl_sha1_init(&dlctx->wrctx.sha1_state);
+
+ dlctx->cur_job = job;
+ job->status = STATUS_RUNNING;
+
+ if ((mcode = curl_multi_add_handle(multi, easy)) != CURLM_OK) {
+ vl_warn("Failed to add easy handle to multi (for %s): %s", job->url, curl_multi_strerror(mcode));
+ goto cleanup;
+ }
+
+ ret = NET_OK;
+
+cleanup:
+ if (ret != NET_OK && dlctx->wrctx.ofile) {
+ fclose(dlctx->wrctx.ofile);
+ dlctx->wrctx.ofile = NULL;
+
+ if (remove(job->opath) < 0) {
+ vl_debug("...also failed to delete touched file %s: %s", job->opath, strerror(errno));
+ } else {
+ vl_trace("removed touched file %s", job->opath);
+ }
+ }
+
+ return ret;
+}
+
+static int xfer_complete(CURLM *multi, CURL *easy, CURLcode ecode)
+{
+ struct dl_all_ctx *ctx = NULL;
+ int ret = NET_EUNSPEC;
+ long statuscode;
+ CURLcode myecode;
+ CURLMcode mcode;
+
+ if ((myecode = curl_easy_getinfo(easy, CURLINFO_PRIVATE, &ctx)) != CURLE_OK || !ctx) {
+ vl_error("Failed to get context information: curl_easy_getinfo(CURLINFO_PRIVATE): %s", curl_easy_strerror(myecode));
+ abort(); /* weird! should never happen */
+ }
+
+ vl_trace("Handling completed transfer %s", ctx->cur_job->url);
+
+ if ((mcode = curl_multi_remove_handle(multi, easy)) != CURLM_OK) {
+ vl_warn("Failed to remove easy handle from multi (for %s): %s", ctx->cur_job->url, curl_multi_strerror(mcode));
+ goto cleanup;
+ }
+
+ if (ecode != CURLE_OK) {
+ ctx->cur_job->status = STATUS_ERROR;
+ ret = translate_curlcode(ecode);
+ goto cleanup;
+ }
+
+ /* transfer completed successfully, make sure everything else is right also. */
+
+#define C(_ex) CHECK(_ex, #_ex, myecode, ctx->errbuf, cleanup, ret)
+
+ C(curl_easy_getinfo(easy, CURLINFO_RESPONSE_CODE, &statuscode));
+
+ if (statuscode / 100 != 2) {
+ /* should never happen */
+ vl_warn("Bad HTTP response code from %s: %ld", ctx->cur_job->url, statuscode);
+ ret = NET_ESTATUS;
+ goto cleanup;
+ }
+
+ vl_trace("Got %ld OK from %s", statuscode, ctx->cur_job->url);
+
+ /* compare integrity info */
+
+ if (ctx->cur_job->verify_flags & VERIFY_SIZE) {
+ if (ctx->cur_job->expect_len != ctx->wrctx.total_read) {
+ vl_warn("Bad integrity on downloaded file %s: size mismatch: expected %zu bytes, got %zu bytes", ctx->cur_job->url, ctx->cur_job->expect_len, ctx->wrctx.total_read);
+ ret = NET_EINTEGRITY;
+ ctx->cur_job->status = STATUS_INTEGRITY;
+ goto cleanup;
+ }
+
+ vl_trace("size matches %s: %zu", ctx->cur_job->url, ctx->cur_job->expect_len);
+ }
+
+ if (ctx->cur_job->verify_flags & VERIFY_SHA1) {
+ vl_sha1 got_hash;
+ vl_sha1_finalize(&ctx->wrctx.sha1_state, got_hash);
+
+ char exp_hex[VL_SHA1_DIGEST_HEX_STRLEN + 1];
+ char got_hex[VL_SHA1_DIGEST_HEX_STRLEN + 1];
+
+ exp_hex[VL_SHA1_DIGEST_HEX_STRLEN] = '\0';
+ got_hex[VL_SHA1_DIGEST_HEX_STRLEN] = '\0';
+
+ vl_sha1_encode(ctx->cur_job->expect_hash, exp_hex);
+ vl_sha1_encode(got_hash, got_hex);
+
+ if (memcmp(ctx->cur_job->expect_hash, got_hash, sizeof(vl_sha1))) {
+ vl_warn("Bad integrity on downloaded file %s: sha1 mismatch: expected %s, got %s", ctx->cur_job->url, exp_hex, got_hex);
+ ret = NET_EINTEGRITY;
+ ctx->cur_job->status = STATUS_INTEGRITY;
+ goto cleanup;
+ }
+
+ vl_trace("sha1 matches %s: %s", ctx->cur_job->url, exp_hex);
+ }
+
+#undef C
+
+ ctx->cur_job->status = STATUS_COMPLETE;
+ ret = NET_OK;
+
+cleanup:
+ if (ctx->wrctx.ofile) {
+ fclose(ctx->wrctx.ofile);
+ ctx->wrctx.ofile = NULL;
+ }
+
+ if (ret != NET_OK) {
+ if (remove(ctx->cur_job->opath) < 0) {
+ vl_debug("...and failed to clean up after failed job: remove(%s): %s", ctx->cur_job->opath, strerror(errno));
+ } else {
+ vl_trace("removed touched file (failed job): %s", ctx->cur_job->opath);
+ }
+ }
+
+ ctx->cur_job = NULL;
+
+ return ret;
+}
+
+int vl_net_download_all(struct vl_download_job *jobs, size_t njobs, size_t simult)
+{
+ vl_arena *arena = NULL;
+ int ret = NET_EUNSPEC;
+ int tempret;
+ CURLM *multi = NULL;
+ CURL **easy_pool = NULL;
+ struct dl_all_ctx *dlcontexts = NULL;
+ size_t next_job = 0;
+ size_t completed_jobs = 0;
+ CURLcode ecode;
+
+ CURLMcode mcode;
+ int nrunning = 0;
+ CURLMsg *mmsg;
+ int nmsg = 0;
+
+ /* sanity check for simultaneous transfer count */
+ if (simult < 8) simult = 8;
+ if (simult > njobs) simult = njobs;
+
+ arena = vl_arena_new((size_t)1 << 14);
+ if (!arena) {
+ vl_warn("Error allocating arena for download job!");
+ goto cleanup;
+ }
+
+ /* array of easy handles */
+ easy_pool = vl_arena_push(arena, simult * sizeof(CURL *));
+ memset(easy_pool, 0, simult * sizeof(CURL *)); /* erm but NULL isn't necessarily represented as all 0s.. */
+
+ /* context info for each easy handle */
+ dlcontexts = vl_arena_push(arena, simult * sizeof(struct dl_all_ctx));
+ memset(dlcontexts, 0, simult * sizeof(struct dl_all_ctx));
+
+ multi = curl_multi_init();
+ if (!multi) {
+ vl_warn("Error creating multi handle for download job");
+ goto cleanup;
+ }
+
+ /* initialize each easy handle with default info */
+ for (size_t i = 0; i < simult; ++i) {
+ easy_pool[i] = curl_easy_init();
+ if (!easy_pool[i]) {
+ vl_warn("Failed to create easy handle %zu", i);
+ goto cleanup;
+ }
+
+ if ((ecode = curl_easy_setopt(easy_pool[i], CURLOPT_ERRORBUFFER, dlcontexts[i].errbuf)) != CURLE_OK) {
+ vl_warn("Failed to set error buffer on easy handle %zu: %s", i, curl_easy_strerror(ecode));
+ goto cleanup;
+ }
+
+#define C(_ex) CHECK(_ex, #_ex, ecode, dlcontexts[i].errbuf, cleanup, ret)
+
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_USERAGENT, VL_USER_AGENT));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_HTTPGET, 1L));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_FOLLOWLOCATION, CURLFOLLOW_ALL));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_TIMEOUT, 60L));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_FAILONERROR, 1L));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_WRITEFUNCTION, &handle_write));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_WRITEDATA, &dlcontexts[i].wrctx));
+ C(curl_easy_setopt(easy_pool[i], CURLOPT_PRIVATE, dlcontexts + i));
+
+#undef C
+ }
+
+ vl_trace("Setting off %zu initial jobs.", simult);
+ for (size_t i = 0; i < simult; ++i) {
+ if ((tempret = setup_easy(multi, easy_pool[i], jobs + i)) != NET_OK) {
+ vl_trace("Failed to set up initial job %zu", i);
+ ret = tempret;
+ goto cleanup;
+ }
+ }
+
+ next_job = simult;
+
+ do {
+ vl_trace("Starting download loop.");
+ mcode = curl_multi_perform(multi, &nrunning);
+ if (mcode != CURLM_OK) {
+ vl_warn("Error in CURL transfer: curl_multi_perform: %s", curl_multi_strerror(mcode));
+ goto cleanup;
+ }
+
+ /* handle complete transfers */
+ do {
+ mmsg = curl_multi_info_read(multi, &nmsg);
+ if (!mmsg) break;
+
+ if (mmsg->msg == CURLMSG_DONE) {
+ /* a transfer has completed */
+ ecode = mmsg->data.result;
+ if ((tempret = xfer_complete(multi, mmsg->easy_handle, ecode)) != NET_OK) {
+ ret = tempret;
+ goto cleanup;
+ }
+
+ ++completed_jobs;
+
+ /* set up the next transfer */
+ if (next_job < njobs) {
+ if ((tempret = setup_easy(multi, mmsg->easy_handle, jobs + next_job)) != NET_OK) {
+ vl_trace("failed to set up job %zu", next_job);
+ ret = tempret;
+ goto cleanup;
+ }
+
+ vl_trace("Set up job %zu", next_job);
+ ++next_job;
+ }
+ }
+ } while (mmsg);
+
+ vl_trace("Download loop complete: %d running, next job: %zu, njobs: %zu, completed: %zu", nrunning, next_job, njobs, completed_jobs);
+
+ if (completed_jobs >= njobs) {
+ vl_trace("Breaking out early %zu >= %zu.", completed_jobs, njobs);
+ break;
+ }
+
+ mcode = curl_multi_poll(multi, NULL, 0, 1000, NULL);
+ if (mcode != CURLM_OK) {
+ vl_warn("Error in CURL transfer: curl_multi_poll: %s", curl_multi_strerror(mcode));
+ goto cleanup;
+ }
+ } while (completed_jobs < njobs);
+
+ ret = NET_OK;
+
+cleanup:
+ if (easy_pool) {
+ for (size_t i = 0; i < simult; ++i) {
+ if (easy_pool[i]) {
+ if (multi) {
+ /* if this call returns an error then idc I did my best */
+ curl_multi_remove_handle(multi, easy_pool[i]);
+ }
+ curl_easy_cleanup(easy_pool[i]);
+ }
+ }
+ }
+
+ if (multi) {
+ curl_multi_cleanup(multi);
+ }
+
+ for (size_t i = 0; i < simult; ++i) {
+ if (dlcontexts[i].cur_job && dlcontexts[i].wrctx.ofile) {
+ fclose(dlcontexts[i].wrctx.ofile);
+ if (remove(dlcontexts[i].cur_job->opath) < 0) {
+ vl_debug("...also failed to remove touched file %s: %s", dlcontexts[i].cur_job->opath, strerror(errno));
+ } else {
+ vl_trace("Removed touched file %s", dlcontexts[i].cur_job->opath);
+ }
+ }
+ }
+
+ if (arena) {
+ vl_arena_free(arena);
+ }
+
+ return ret;
+}